From 67f96fb2d0cce03501695f9aa7f2bcad8d03535c Mon Sep 17 00:00:00 2001 From: j-rafique Date: Mon, 4 May 2026 21:09:12 +0000 Subject: [PATCH] feat(lep6): finalize integration observability and tests --- .github/actions/setup-env/action.yml | 29 +- .github/workflows/build&release.yml | 8 +- .github/workflows/tests.yml | 34 +- .gitignore | 2 + Makefile | 43 +- docs/lep6-supernode-runbook.md | 111 ++ gen/supernode/service.pb.go | 345 +++- gen/supernode/service.pb.gw.go | 644 +++++--- gen/supernode/service.swagger.json | 125 ++ gen/supernode/service_grpc.pb.go | 2 +- gen/supernode/status.pb.go | 1390 +++++++++++++---- gen/supernode/storage_challenge.swagger.json | 67 + pkg/lumera/modules/audit/impl_test.go | 13 + pkg/lumera/modules/audit_msg/impl_test.go | 46 + pkg/metrics/lep6/metrics.go | 263 ++++ pkg/metrics/lep6/metrics_test.go | 92 ++ pkg/storage/queries/recheck.go | 82 +- pkg/storage/queries/recheck_interface.go | 7 + pkg/storage/queries/recheck_test.go | 25 + pkg/storage/queries/self_healing_lep6.go | 83 +- pkg/storage/queries/self_healing_lep6_test.go | 42 +- pkg/storage/queries/sqlite.go | 39 +- pkg/storage/queries/sqlite_open_test.go | 27 + proto/supernode/status.proto | 33 + supernode/cmd/helpers.go | 9 +- supernode/cmd/start.go | 14 +- supernode/config.yml | 28 + supernode/config/config.go | 36 +- supernode/config/config_lep6_test.go | 244 +++ supernode/config/defaults.go | 21 + supernode/config/lep6.go | 191 +++ supernode/config/save.go | 25 + supernode/recheck/attestor.go | 18 +- supernode/recheck/attestor_test.go | 4 +- supernode/recheck/finder_service_test.go | 15 + supernode/recheck/service.go | 38 +- supernode/recheck/test_helpers_test.go | 30 +- supernode/recheck/types.go | 14 +- supernode/self_healing/finalizer.go | 3 + supernode/self_healing/healer.go | 50 +- supernode/self_healing/mocks_test.go | 9 + supernode/self_healing/service.go | 67 +- supernode/self_healing/service_test.go | 37 + supernode/self_healing/verifier.go | 57 +- supernode/status/service.go | 42 + supernode/status/service_test.go | 36 + supernode/storage_challenge/lep6_dispatch.go | 11 + supernode/storage_challenge/result_buffer.go | 5 +- .../storage_challenge/ticket_provider.go | 27 + .../storage_challenge/ticket_provider_test.go | 32 +- tests/scripts/setup-supernodes.sh | 2 +- tests/system/config.lep6-1.yml | 59 + tests/system/config.lep6-2.yml | 60 + tests/system/config.lep6-3.yml | 60 + tests/system/e2e_lep6_helpers_test.go | 964 ++++++++++++ tests/system/e2e_lep6_runtime_test.go | 515 ++++++ tests/system/e2e_lep6_test.go | 60 + tests/system/genesis_io.go | 22 + tests/system/go.mod | 6 +- tests/system/go.sum | 4 +- tests/system/supernode-utils.go | 18 +- 61 files changed, 5620 insertions(+), 765 deletions(-) create mode 100644 docs/lep6-supernode-runbook.md create mode 100644 pkg/lumera/modules/audit/impl_test.go create mode 100644 pkg/lumera/modules/audit_msg/impl_test.go create mode 100644 pkg/metrics/lep6/metrics.go create mode 100644 pkg/metrics/lep6/metrics_test.go create mode 100644 pkg/storage/queries/sqlite_open_test.go create mode 100644 supernode/config/config_lep6_test.go create mode 100644 supernode/config/lep6.go create mode 100644 tests/system/config.lep6-1.yml create mode 100644 tests/system/config.lep6-2.yml create mode 100644 tests/system/config.lep6-3.yml create mode 100644 tests/system/e2e_lep6_helpers_test.go create mode 100644 tests/system/e2e_lep6_runtime_test.go create mode 100644 tests/system/e2e_lep6_test.go diff --git a/.github/actions/setup-env/action.yml b/.github/actions/setup-env/action.yml index 41e49b25..7b6d0cf7 100644 --- a/.github/actions/setup-env/action.yml +++ b/.github/actions/setup-env/action.yml @@ -1,11 +1,11 @@ name: Setup Environment description: Sets up Go (dynamically from go.mod) and installs system dependencies -inputs: {} -# bust_lumera_retag: -# description: "One-time: remove lumera sums after retag" -# required: false -# default: 'false' +inputs: + bust_lumera_retag: + description: "One-time: remove cached Lumera module artifacts after a retag/checksum refresh" + required: false + default: 'false' outputs: go-version: description: "Go version parsed from go.mod" @@ -33,17 +33,14 @@ runs: sudo apt-get update sudo apt-get install -y libwebp-dev make - # - name: One-time reset retagged lumera checksums - # if: ${{ inputs.bust_lumera_retag == 'true' }} - # shell: bash - # run: | - # echo "Busting go.sum entries for github.com/LumeraProtocol/lumera v1.11.0-rc (one-time)" - # # Remove stale checksums in all local modules - # find . -name 'go.sum' -maxdepth 3 -print0 | xargs -0 -I{} sed -i \ - # '/github.com\/LumeraProtocol\/lumera v1.11.0-rc/d' {} - # # Clear module/build caches to avoid cached zips - # go clean -modcache || true - # rm -rf "$(go env GOCACHE)" || true + - name: Bust cached Lumera module artifacts + if: ${{ inputs.bust_lumera_retag == 'true' }} + shell: bash + run: | + echo "Busting cached Lumera module artifacts before go mod download" + go clean -modcache || true + rm -rf "$(go env GOCACHE)" || true + rm -rf "$(go env GOPATH)/pkg/mod/cache/download/github.com/!lumera!protocol/lumera" || true - name: Set Go Private Modules shell: bash diff --git a/.github/workflows/build&release.yml b/.github/workflows/build&release.yml index cd99058b..74f9483c 100644 --- a/.github/workflows/build&release.yml +++ b/.github/workflows/build&release.yml @@ -27,8 +27,8 @@ jobs: - name: Setup Go and dependencies uses: ./.github/actions/setup-env - # with: - # bust_lumera_retag: 'true' + with: + bust_lumera_retag: 'true' - name: Build binaries run: | @@ -74,8 +74,8 @@ jobs: - name: Setup Go and dependencies uses: ./.github/actions/setup-env - # with: - # bust_lumera_retag: 'true' + with: + bust_lumera_retag: 'true' - name: Prepare Release Variables id: vars diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 26796204..d7d6e2d9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,8 +17,8 @@ jobs: uses: actions/checkout@v6.0.1 - name: Setup Go and system deps uses: ./.github/actions/setup-env - # with: - # bust_lumera_retag: 'true' + with: + bust_lumera_retag: 'true' - name: Go mod tidy run: go mod tidy @@ -35,8 +35,8 @@ jobs: - name: Setup Go and system deps uses: ./.github/actions/setup-env - # with: - # bust_lumera_retag: 'true' + with: + bust_lumera_retag: 'true' - name: Go mod tidy run: go mod tidy @@ -54,8 +54,8 @@ jobs: - name: Setup Go and system deps uses: ./.github/actions/setup-env - # with: - # bust_lumera_retag: 'true' + with: + bust_lumera_retag: 'true' - name: Go mod tidy run: go mod tidy @@ -70,6 +70,28 @@ jobs: - name: Run cascade e2e tests run: make test-cascade + lep6-e2e-tests: + name: lep6-e2e-tests + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v6.0.1 + + - name: Setup Go and system deps + uses: ./.github/actions/setup-env + with: + bust_lumera_retag: 'true' + + - name: Go mod tidy + run: go mod tidy + + - name: Install Lumera + run: make install-lumera + + - name: Run LEP-6 e2e tests + run: make test-lep6 + # sn-manager-e2e-tests: # name: sn-manager-e2e-tests # runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 39296abf..9d32cd07 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,8 @@ go.work go.work.sum tests/system/testnet tests/system/**/supernode-data* +tests/system/supernode-lep6-data*/ +.lep6-wip-backup/ tests/system/data tests/system/1 # env file diff --git a/Makefile b/Makefile index 9445e724..26de039d 100644 --- a/Makefile +++ b/Makefile @@ -120,9 +120,9 @@ release: ################################################### ### Tests and Simulation ### ################################################### -.PHONY: test-e2e test-unit test-integration test-system test-cascade test-sn-manager -.PHONY: install-lumera setup-supernodes system-test-setup install-deps -.PHONY: gen-cascade gen-supernode +.PHONY: test-e2e test-unit test-integration test-system test-cascade test-lep6 test-sn-manager +.PHONY: install-lumera setup-supernodes setup-lep6-supernodes system-test-setup install-deps +.PHONY: gen-cascade gen-supernode audit-mod-clean lep6-reset-dedup lep6-validate-config test-unit: ${GO} test -v ./... @@ -159,9 +159,15 @@ SUPERNODE_SRC=supernode/main.go DATA_DIR=tests/system/supernode-data1 DATA_DIR2=tests/system/supernode-data2 DATA_DIR3=tests/system/supernode-data3 +LEP6_DATA_DIR=tests/system/supernode-lep6-data1 +LEP6_DATA_DIR2=tests/system/supernode-lep6-data2 +LEP6_DATA_DIR3=tests/system/supernode-lep6-data3 CONFIG_FILE=tests/system/config.test-1.yml CONFIG_FILE2=tests/system/config.test-2.yml CONFIG_FILE3=tests/system/config.test-3.yml +LEP6_CONFIG_FILE=tests/system/config.lep6-1.yml +LEP6_CONFIG_FILE2=tests/system/config.lep6-2.yml +LEP6_CONFIG_FILE3=tests/system/config.lep6-3.yml # Setup script SETUP_SCRIPT=tests/scripts/setup-supernodes.sh @@ -186,6 +192,12 @@ setup-supernodes: @chmod +x $(SETUP_SCRIPT) @bash $(SETUP_SCRIPT) all $(SUPERNODE_SRC) $(DATA_DIR) $(CONFIG_FILE) $(DATA_DIR2) $(CONFIG_FILE2) $(DATA_DIR3) $(CONFIG_FILE3) +setup-lep6-supernodes: + @echo "Setting up isolated LEP-6 supernode environments..." + @rm -rf tests/system/heal-staging + @chmod +x $(SETUP_SCRIPT) + @bash $(SETUP_SCRIPT) all $(SUPERNODE_SRC) $(LEP6_DATA_DIR) $(LEP6_CONFIG_FILE) $(LEP6_DATA_DIR2) $(LEP6_CONFIG_FILE2) $(LEP6_DATA_DIR3) $(LEP6_CONFIG_FILE3) + # Complete system test setup (Lumera + Supernodes) system-test-setup: install-lumera setup-supernodes @echo "System test environment setup complete." @@ -201,6 +213,31 @@ test-cascade: @echo "Running cascade e2e tests..." @cd tests/system && ${GO} mod tidy && ${GO} test -tags=system_test -v -run TestCascadeE2E . +# Run LEP-6 e2e tests only against the real lumerad/local-chain system harness. +# The runtime test uses isolated supernode-lep6-data* directories so per-node +# SQLite history/dedup state is not shared with Cascade fixtures or other nodes. +test-lep6: setup-lep6-supernodes + @echo "Running LEP-6 e2e tests..." + @cd tests/system && ${GO} mod tidy && ${GO} test -tags=system_test -timeout=900s -v -run '^TestLEP6' . + +# Validate LEP-6 local config/default/fixture coverage without starting a network. +lep6-validate-config: + @echo "Validating LEP-6 supernode config fixtures..." + @${GO} test ./supernode/config -run 'TestLoadConfig_LEP6|TestCreateDefaultConfig_IncludesExplicitLEP6Blocks|TestSystemConfigFixturesIncludeLEP6' -count=1 + +# Recover from stale Lumera module checksum/cache issues during local PR-6 work. +audit-mod-clean: + @echo "Cleaning Go module cache and re-resolving modules..." + @${GO} clean -modcache + @${GO} mod download + +# Reset local LEP-6 dedup/reconciliation tables. Requires DB=/absolute/path/to/local.db. +lep6-reset-dedup: + @if [ -z "$(DB)" ]; then echo "DB=/absolute/path/to/local.db is required"; exit 2; fi + @test -f "$(DB)" || (echo "DB does not exist: $(DB)"; exit 2) + @echo "Resetting LEP-6 local dedup tables in $(DB): heal_claims_submitted, heal_verifications_submitted, storage_recheck_submissions, recheck_attempt_failures" + @sqlite3 "$(DB)" "DELETE FROM heal_claims_submitted; DELETE FROM heal_verifications_submitted; DELETE FROM storage_recheck_submissions; DELETE FROM recheck_attempt_failures;" + # Run sn-manager e2e tests only test-sn-manager: @echo "Running sn-manager e2e tests..." diff --git a/docs/lep6-supernode-runbook.md b/docs/lep6-supernode-runbook.md new file mode 100644 index 00000000..1662ce2e --- /dev/null +++ b/docs/lep6-supernode-runbook.md @@ -0,0 +1,111 @@ +# LEP-6 Supernode Release Runbook + +This runbook covers the Supernode-side LEP-6 storage-truth enforcement support introduced across the LEP-6 PR stack and finalized in PR-6. + +## Scope + +Supernode LEP-6 provides runtime support for Lumera `v1.12.0` audit/storage-truth APIs: + +- storage challenge ticket discovery and transcript/evidence submission; +- storage recheck candidate discovery, local retry budget, and `MsgSubmitStorageRecheckEvidence` submission; +- self-healing heal-op dispatch, healer claim submission, verifier attestation submission, and finalizer publication only after chain-verified heal success; +- repo-native in-process observability snapshots plus structured `logtrace` events. + +The chain remains the source of truth for heal-op scheduling, verifier assignment, verification quorum, rejected/failed/expired status, and scoring/probation changes. + +## Release prerequisites + +1. Supernode must depend on Lumera `v1.12.0` APIs. +2. Operators must run against a Lumera chain whose audit module includes LEP-6 storage-truth endpoints. +3. Supernode local SQLite storage must be writable; PR-6 adds local idempotency state for pending/submitted heal and recheck txs. +4. Existing Supernode status/log collection should be enabled so LEP-6 snapshot counters and structured logs are visible through the same operator workflow used by storage challenge, Cascade, and supernode metrics. + +## Local validation commands + +From the supernode repository root: + +```bash +export PATH=/home/openclaw/.local/go/bin:$PATH +go test $(go list ./... | grep -v '/tests') +``` + +For the real-chain LEP-6 system test: + +```bash +make system-test-setup +make test-lep6 +``` + +`make test-lep6` runs `tests/system/TestLEP6RealChainIntegration` using the same real `lumerad`/local-chain harness as Cascade e2e. It does not use chain mocks. + +## Observability + +LEP-6 uses the repo-native Supernode observability pattern: in-process atomic snapshots plus structured `logtrace` fields. PR-6 does **not** add a LEP-6-only Prometheus endpoint. + +LEP-6 snapshot signals include: + +- challenge dispatch results by chain result class; +- challenge dispatch throttling drops by reason; +- challenge dispatch epoch duration totals/counts by role; +- ticket discovery outcomes; +- no-ticket-provider-active state; +- recheck candidates discovered and current pending candidate gauge; +- recheck submissions by result class/result; +- recheck already-submitted dedupe count; +- recheck failure counts by stage; +- heal claims by result; +- heal claim reconciliation count; +- heal verifications by result/vote; +- heal verification already-recorded dedupe count; +- self-healing pending claim gauge; +- self-healing staging bytes gauge; +- finalizer publish count; +- finalizer cleanup count by terminal chain status. + +Suggested alerts/signals from snapshots/logs: + +- sustained heal-claim `submit_error` or `stage_error` increases; +- sustained heal-verification `submit_error` or `stage_error` increases; +- sustained recheck failure increases by stage; +- challenge dispatch throttling drops approaching the chain cap; +- no-ticket-provider-active remaining true after candidate-producing epochs; +- self-healing staging bytes increasing without matching finalizer publish/cleanup progress; +- rejected/failed/expired finalizer cleanup spikes after a release. + +## Operational behavior + +### Successful healing + +1. Chain schedules a heal-op and assigns a healer/verifiers. +2. Healer stages recovered data locally and pre-stages a local dedup row. +3. Healer submits `MsgClaimHealComplete`. +4. On chain acceptance, Supernode marks the local row as submitted. +5. Verifiers fetch and verify the staged manifest/hash, pre-stage local dedup rows, and submit `MsgSubmitHealVerification`. +6. Once chain marks the heal-op verified, the finalizer publishes the healed artifact to the P2P layer. + +Important: the healed file is not published as durable P2P recovery output before successful chain verification. + +### Rejected healing + +If verifier quorum rejects the heal, the chain marks the heal-op rejected/failed according to Lumera `v1.12.0` keeper rules. Supernode does not publish the healer output as recovered data. + +### Healer cannot heal / no-show + +If the healer cannot produce a valid manifest or misses the deadline, the chain eventually expires/fails the heal-op and applies LEP-6 scoring/probation rules. Supernode records errors and retry/backoff state locally where applicable, but does not override chain status. + +### Restart/idempotency + +PR-6 closes the submit-success/persist-crash window by pre-staging local pending rows before chain tx submission for: + +- heal claims; +- heal verifications; +- recheck evidence submissions. + +Pending rows dedup retries after restart; successful txs are marked submitted after chain acceptance. Submit failures remove the pending row so the operation can retry later. + +## Troubleshooting + +- If duplicate tx errors appear after restart, inspect local SQLite `status` values for LEP-6 pending/submitted tables and compare with chain heal/recheck state. +- If recheck candidates stop processing, inspect `recheck_attempt_failures`; failures expire after the configured TTL and successful submissions clear the failure budget. +- If LEP-6 counters are flat while work is expected, inspect service startup/configuration first, then check structured `logtrace` events for the challenge, recheck, and self-healing services. +- If `make test-lep6` fails before tests start, run `make system-test-setup` and confirm `lumerad version` matches the Lumera dependency version. diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go index f74c97f0..90990d10 100644 --- a/gen/supernode/service.pb.go +++ b/gen/supernode/service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.9 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v4.25.1 // source: supernode/service.proto package supernode @@ -12,7 +12,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -23,16 +22,18 @@ const ( ) type ListServicesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *ListServicesRequest) Reset() { *x = ListServicesRequest{} - mi := &file_supernode_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListServicesRequest) String() string { @@ -43,7 +44,7 @@ func (*ListServicesRequest) ProtoMessage() {} func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { mi := &file_supernode_service_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -59,18 +60,21 @@ func (*ListServicesRequest) Descriptor() ([]byte, []int) { } type ListServicesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` - Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` } func (x *ListServicesResponse) Reset() { *x = ListServicesResponse{} - mi := &file_supernode_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListServicesResponse) String() string { @@ -81,7 +85,7 @@ func (*ListServicesResponse) ProtoMessage() {} func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { mi := &file_supernode_service_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -111,18 +115,21 @@ func (x *ListServicesResponse) GetCount() int32 { } type ServiceInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` } func (x *ServiceInfo) Reset() { *x = ServiceInfo{} - mi := &file_supernode_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServiceInfo) String() string { @@ -133,7 +140,7 @@ func (*ServiceInfo) ProtoMessage() {} func (x *ServiceInfo) ProtoReflect() protoreflect.Message { mi := &file_supernode_service_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -164,17 +171,20 @@ func (x *ServiceInfo) GetMethods() []string { // Raw pprof request/response messages type RawPprofRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (0 for binary, >0 for text) - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (0 for binary, >0 for text) } func (x *RawPprofRequest) Reset() { *x = RawPprofRequest{} - mi := &file_supernode_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RawPprofRequest) String() string { @@ -185,7 +195,7 @@ func (*RawPprofRequest) ProtoMessage() {} func (x *RawPprofRequest) ProtoReflect() protoreflect.Message { mi := &file_supernode_service_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -208,17 +218,20 @@ func (x *RawPprofRequest) GetDebug() int32 { } type RawPprofCpuRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // CPU profile duration in seconds (default 30) - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // CPU profile duration in seconds (default 30) } func (x *RawPprofCpuRequest) Reset() { *x = RawPprofCpuRequest{} - mi := &file_supernode_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RawPprofCpuRequest) String() string { @@ -229,7 +242,7 @@ func (*RawPprofCpuRequest) ProtoMessage() {} func (x *RawPprofCpuRequest) ProtoReflect() protoreflect.Message { mi := &file_supernode_service_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -252,17 +265,20 @@ func (x *RawPprofCpuRequest) GetSeconds() int32 { } type RawPprofResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Raw pprof data exactly as returned by runtime/pprof - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Raw pprof data exactly as returned by runtime/pprof } func (x *RawPprofResponse) Reset() { *x = RawPprofResponse{} - mi := &file_supernode_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RawPprofResponse) String() string { @@ -273,7 +289,7 @@ func (*RawPprofResponse) ProtoMessage() {} func (x *RawPprofResponse) ProtoReflect() protoreflect.Message { mi := &file_supernode_service_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -297,45 +313,141 @@ func (x *RawPprofResponse) GetData() []byte { var File_supernode_service_proto protoreflect.FileDescriptor -const file_supernode_service_proto_rawDesc = "" + - "\n" + - "\x17supernode/service.proto\x12\tsupernode\x1a\x16supernode/status.proto\x1a\x1cgoogle/api/annotations.proto\"\x15\n" + - "\x13ListServicesRequest\"`\n" + - "\x14ListServicesResponse\x122\n" + - "\bservices\x18\x01 \x03(\v2\x16.supernode.ServiceInfoR\bservices\x12\x14\n" + - "\x05count\x18\x02 \x01(\x05R\x05count\";\n" + - "\vServiceInfo\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + - "\amethods\x18\x02 \x03(\tR\amethods\"'\n" + - "\x0fRawPprofRequest\x12\x14\n" + - "\x05debug\x18\x01 \x01(\x05R\x05debug\".\n" + - "\x12RawPprofCpuRequest\x12\x18\n" + - "\aseconds\x18\x01 \x01(\x05R\aseconds\"&\n" + - "\x10RawPprofResponse\x12\x12\n" + - "\x04data\x18\x01 \x01(\fR\x04data2\xec\v\n" + - "\x10SupernodeService\x12X\n" + - "\tGetStatus\x12\x18.supernode.StatusRequest\x1a\x19.supernode.StatusResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/status\x12i\n" + - "\fListServices\x12\x1e.supernode.ListServicesRequest\x1a\x1f.supernode.ListServicesResponse\"\x18\x82\xd3\xe4\x93\x02\x12\x12\x10/api/v1/services\x12g\n" + - "\vGetRawPprof\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/api/v1/debug/raw/pprof\x12p\n" + - "\x0fGetRawPprofHeap\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"$\x82\xd3\xe4\x93\x02\x1e\x12\x1c/api/v1/debug/raw/pprof/heap\x12z\n" + - "\x14GetRawPprofGoroutine\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\")\x82\xd3\xe4\x93\x02#\x12!/api/v1/debug/raw/pprof/goroutine\x12t\n" + - "\x11GetRawPprofAllocs\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"&\x82\xd3\xe4\x93\x02 \x12\x1e/api/v1/debug/raw/pprof/allocs\x12r\n" + - "\x10GetRawPprofBlock\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/block\x12r\n" + - "\x10GetRawPprofMutex\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/mutex\x12\x80\x01\n" + - "\x17GetRawPprofThreadcreate\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\",\x82\xd3\xe4\x93\x02&\x12$/api/v1/debug/raw/pprof/threadcreate\x12y\n" + - "\x12GetRawPprofProfile\x12\x1d.supernode.RawPprofCpuRequest\x1a\x1b.supernode.RawPprofResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1/debug/raw/pprof/profile\x12v\n" + - "\x12GetRawPprofCmdline\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1/debug/raw/pprof/cmdline\x12t\n" + - "\x11GetRawPprofSymbol\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"&\x82\xd3\xe4\x93\x02 \x12\x1e/api/v1/debug/raw/pprof/symbol\x12r\n" + - "\x10GetRawPprofTrace\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/traceB6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" +var file_supernode_service_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x60, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x22, 0x3b, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, + 0x22, 0x27, 0x0a, 0x0f, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x2e, 0x0a, 0x12, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x26, 0x0a, 0x10, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x32, 0xec, 0x0b, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, + 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x67, 0x0a, 0x0b, 0x47, + 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, + 0x70, 0x72, 0x6f, 0x66, 0x12, 0x70, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x48, 0x65, 0x61, 0x70, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, + 0x66, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x12, 0x7a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x47, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x12, 0x1a, + 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, + 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, + 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x65, 0x12, 0x74, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, + 0x66, 0x2f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x73, 0x12, 0x72, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, + 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1a, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, + 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x72, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4d, 0x75, 0x74, 0x65, 0x78, + 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x6d, 0x75, 0x74, 0x65, 0x78, + 0x12, 0x80, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, + 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x12, 0x24, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, + 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x12, 0x79, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, + 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x73, 0x75, 0x70, 0x65, + 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x70, + 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, + 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x76, + 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x43, 0x6d, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x63, + 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x74, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x1a, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x72, 0x61, 0x77, 0x2f, + 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x72, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x72, 0x61, 0x63, 0x65, + 0x12, 0x1a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x61, 0x77, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x2f, 0x72, 0x61, 0x77, 0x2f, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, + 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_supernode_service_proto_rawDescOnce sync.Once - file_supernode_service_proto_rawDescData []byte + file_supernode_service_proto_rawDescData = file_supernode_service_proto_rawDesc ) func file_supernode_service_proto_rawDescGZIP() []byte { file_supernode_service_proto_rawDescOnce.Do(func() { - file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_service_proto_rawDesc), len(file_supernode_service_proto_rawDesc))) + file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_service_proto_rawDescData) }) return file_supernode_service_proto_rawDescData } @@ -392,11 +504,85 @@ func file_supernode_service_proto_init() { return } file_supernode_status_proto_init() + if !protoimpl.UnsafeEnabled { + file_supernode_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListServicesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListServicesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServiceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*RawPprofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*RawPprofCpuRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*RawPprofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_service_proto_rawDesc), len(file_supernode_service_proto_rawDesc)), + RawDescriptor: file_supernode_service_proto_rawDesc, NumEnums: 0, NumMessages: 6, NumExtensions: 0, @@ -407,6 +593,7 @@ func file_supernode_service_proto_init() { MessageInfos: file_supernode_service_proto_msgTypes, }.Build() File_supernode_service_proto = out.File + file_supernode_service_proto_rawDesc = nil file_supernode_service_proto_goTypes = nil file_supernode_service_proto_depIdxs = nil } diff --git a/gen/supernode/service.pb.gw.go b/gen/supernode/service.pb.gw.go index 89e6ca78..93983b0f 100644 --- a/gen/supernode/service.pb.gw.go +++ b/gen/supernode/service.pb.gw.go @@ -10,7 +10,6 @@ package supernode import ( "context" - "errors" "io" "net/http" @@ -25,470 +24,478 @@ import ( ) // Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + var ( - _ codes.Code - _ io.Reader - _ status.Status - _ = errors.New - _ = runtime.String - _ = utilities.NewDoubleArray - _ = metadata.Join + filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) -var filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} - func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq StatusRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq StatusRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq StatusRequest - metadata runtime.ServerMetadata - ) + var protoReq StatusRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetStatus(ctx, &protoReq) return msg, metadata, err + } func request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq ListServicesRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq ListServicesRequest + var metadata runtime.ServerMetadata + msg, err := client.ListServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq ListServicesRequest - metadata runtime.ServerMetadata - ) + var protoReq ListServicesRequest + var metadata runtime.ServerMetadata + msg, err := server.ListServices(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprof(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprof(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofHeap(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofGoroutine(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofAllocs(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofBlock(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofMutex(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofThreadcreate(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofCpuRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofCpuRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofCpuRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofCpuRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofProfile(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofCmdline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofCmdline(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofSymbol(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofSymbol(ctx, &protoReq) return msg, metadata, err + } -var filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +var ( + filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) func request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := client.GetRawPprofTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err + } func local_request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq RawPprofRequest - metadata runtime.ServerMetadata - ) + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + msg, err := server.GetRawPprofTrace(ctx, &protoReq) return msg, metadata, err + } // RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". // UnaryRPC :call SupernodeServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. -// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { - mux.Handle(http.MethodGet, pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -500,15 +507,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -520,15 +532,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -540,15 +557,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -560,15 +582,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -580,15 +607,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -600,15 +632,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -620,15 +657,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -640,15 +682,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -660,15 +707,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -680,15 +732,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -700,15 +757,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -720,15 +782,20 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -740,7 +807,9 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) return nil @@ -767,6 +836,7 @@ func RegisterSupernodeServiceHandlerFromEndpoint(ctx context.Context, mux *runti } }() }() + return RegisterSupernodeServiceHandler(ctx, mux, conn) } @@ -780,13 +850,16 @@ func RegisterSupernodeServiceHandler(ctx context.Context, mux *runtime.ServeMux, // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SupernodeServiceClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SupernodeServiceClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "SupernodeServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares. +// "SupernodeServiceClient" to call the correct interceptors. func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SupernodeServiceClient) error { - mux.Handle(http.MethodGet, pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -797,13 +870,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -814,13 +892,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -831,13 +914,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -848,13 +936,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -865,13 +958,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -882,13 +980,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -899,13 +1002,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -916,13 +1024,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -933,13 +1046,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -950,13 +1068,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -967,13 +1090,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -984,13 +1112,18 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1001,39 +1134,66 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + return nil } var ( - pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) - pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) - pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) - pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) - pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) - pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) - pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) - pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) + pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) + + pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) + + pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) + + pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) + + pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) + + pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) + + pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) + + pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) + pattern_SupernodeService_GetRawPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "threadcreate"}, "")) - pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) - pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) - pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) - pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) + + pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) + + pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) + + pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) + + pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) ) var ( - forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage - forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofThreadcreate_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage - forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage ) diff --git a/gen/supernode/service.swagger.json b/gen/supernode/service.swagger.json index 523499b8..8be81666 100644 --- a/gen/supernode/service.swagger.json +++ b/gen/supernode/service.swagger.json @@ -668,6 +668,128 @@ } } }, + "StatusResponseLEP6Metrics": { + "type": "object", + "properties": { + "dispatchResultsTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + }, + "description": "Storage challenge / dispatch signals." + }, + "dispatchThrottledTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "dispatchEpochDurationMillisTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "dispatchEpochDurationMillisMax": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "dispatchEpochDurationCount": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "ticketDiscoveryTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "noTicketProviderActive": { + "type": "string", + "format": "int64" + }, + "healClaimsSubmittedTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + }, + "description": "Self-healing signals." + }, + "healClaimsReconciledTotal": { + "type": "string", + "format": "uint64" + }, + "healVerificationsSubmittedTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "healVerificationsAlreadyExistsTotal": { + "type": "string", + "format": "uint64" + }, + "healFinalizePublishesTotal": { + "type": "string", + "format": "uint64" + }, + "healFinalizeCleanupsTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "selfHealingPendingClaims": { + "type": "string", + "format": "int64" + }, + "selfHealingStagingBytes": { + "type": "string", + "format": "int64" + }, + "recheckCandidatesFoundTotal": { + "type": "string", + "format": "uint64", + "description": "Storage recheck signals." + }, + "recheckEvidenceSubmittedTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "recheckEvidenceAlreadySubmittedTotal": { + "type": "string", + "format": "uint64" + }, + "recheckExecutionFailuresTotal": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "uint64" + } + }, + "recheckPendingCandidates": { + "type": "string", + "format": "int64" + } + }, + "description": "LEP-6 storage-truth runtime metrics and diagnostics. These are in-memory\ncounters/gauges reset on process restart, matching the existing typed\nstatus-snapshot pattern used for P2P metrics." + }, "StatusResponseNetwork": { "type": "object", "properties": { @@ -876,6 +998,9 @@ }, "p2pMetrics": { "$ref": "#/definitions/StatusResponseP2PMetrics" + }, + "lep6Metrics": { + "$ref": "#/definitions/StatusResponseLEP6Metrics" } }, "title": "The StatusResponse represents system status with clear organization" diff --git a/gen/supernode/service_grpc.pb.go b/gen/supernode/service_grpc.pb.go index 42857bf2..2b905062 100644 --- a/gen/supernode/service_grpc.pb.go +++ b/gen/supernode/service_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.5.1 -// - protoc v3.21.12 +// - protoc v4.25.1 // source: supernode/service.proto package supernode diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go index 8b6a75d3..7d79b536 100644 --- a/gen/supernode/status.pb.go +++ b/gen/supernode/status.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.9 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v4.25.1 // source: supernode/status.proto package supernode @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -23,19 +22,22 @@ const ( // StatusRequest controls optional metrics in the status response type StatusRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Optional: include detailed P2P metrics in the response // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *StatusRequest) Reset() { *x = StatusRequest{} - mi := &file_supernode_status_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusRequest) String() string { @@ -46,7 +48,7 @@ func (*StatusRequest) ProtoMessage() {} func (x *StatusRequest) ProtoReflect() protoreflect.Message { mi := &file_supernode_status_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -70,7 +72,10 @@ func (x *StatusRequest) GetIncludeP2PMetrics() bool { // The StatusResponse represents system status with clear organization type StatusResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` @@ -80,15 +85,16 @@ type StatusResponse struct { Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Lep6Metrics *StatusResponse_LEP6Metrics `protobuf:"bytes,10,opt,name=lep6_metrics,json=lep6Metrics,proto3" json:"lep6_metrics,omitempty"` } func (x *StatusResponse) Reset() { *x = StatusResponse{} - mi := &file_supernode_status_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse) String() string { @@ -99,7 +105,7 @@ func (*StatusResponse) ProtoMessage() {} func (x *StatusResponse) ProtoReflect() protoreflect.Message { mi := &file_supernode_status_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -177,22 +183,32 @@ func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics { return nil } +func (x *StatusResponse) GetLep6Metrics() *StatusResponse_LEP6Metrics { + if x != nil { + return x.Lep6Metrics + } + return nil +} + // System resource information type StatusResponse_Resources struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"` HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM") - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *StatusResponse_Resources) Reset() { *x = StatusResponse_Resources{} - mi := &file_supernode_status_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_Resources) String() string { @@ -203,7 +219,7 @@ func (*StatusResponse_Resources) ProtoMessage() {} func (x *StatusResponse_Resources) ProtoReflect() protoreflect.Message { mi := &file_supernode_status_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -248,19 +264,22 @@ func (x *StatusResponse_Resources) GetHardwareSummary() string { // ServiceTasks contains task information for a specific service type StatusResponse_ServiceTasks struct { - state protoimpl.MessageState `protogen:"open.v1"` - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` - TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` + TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` } func (x *StatusResponse_ServiceTasks) Reset() { *x = StatusResponse_ServiceTasks{} - mi := &file_supernode_status_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_ServiceTasks) String() string { @@ -271,7 +290,7 @@ func (*StatusResponse_ServiceTasks) ProtoMessage() {} func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message { mi := &file_supernode_status_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -309,18 +328,21 @@ func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 { // Network information type StatusResponse_Network struct { - state protoimpl.MessageState `protogen:"open.v1"` - PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network - PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network + PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) } func (x *StatusResponse_Network) Reset() { *x = StatusResponse_Network{} - mi := &file_supernode_status_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_Network) String() string { @@ -331,7 +353,7 @@ func (*StatusResponse_Network) ProtoMessage() {} func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { mi := &file_supernode_status_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -362,22 +384,25 @@ func (x *StatusResponse_Network) GetPeerAddresses() []string { // P2P metrics and diagnostics (additive field) type StatusResponse_P2PMetrics struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` - NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics) Reset() { *x = StatusResponse_P2PMetrics{} - mi := &file_supernode_status_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_P2PMetrics) String() string { @@ -388,7 +413,7 @@ func (*StatusResponse_P2PMetrics) ProtoMessage() {} func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { mi := &file_supernode_status_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -445,19 +470,227 @@ func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskSta return nil } -type StatusResponse_Resources_CPU struct { - state protoimpl.MessageState `protogen:"open.v1"` - UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) - Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores +// LEP-6 storage-truth runtime metrics and diagnostics. These are in-memory +// counters/gauges reset on process restart, matching the existing typed +// status-snapshot pattern used for P2P metrics. +type StatusResponse_LEP6Metrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Storage challenge / dispatch signals. + DispatchResultsTotal map[string]uint64 `protobuf:"bytes,1,rep,name=dispatch_results_total,json=dispatchResultsTotal,proto3" json:"dispatch_results_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + DispatchThrottledTotal map[string]uint64 `protobuf:"bytes,2,rep,name=dispatch_throttled_total,json=dispatchThrottledTotal,proto3" json:"dispatch_throttled_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + DispatchEpochDurationMillisTotal map[string]uint64 `protobuf:"bytes,3,rep,name=dispatch_epoch_duration_millis_total,json=dispatchEpochDurationMillisTotal,proto3" json:"dispatch_epoch_duration_millis_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + DispatchEpochDurationMillisMax map[string]uint64 `protobuf:"bytes,4,rep,name=dispatch_epoch_duration_millis_max,json=dispatchEpochDurationMillisMax,proto3" json:"dispatch_epoch_duration_millis_max,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + DispatchEpochDurationCount map[string]uint64 `protobuf:"bytes,5,rep,name=dispatch_epoch_duration_count,json=dispatchEpochDurationCount,proto3" json:"dispatch_epoch_duration_count,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + TicketDiscoveryTotal map[string]uint64 `protobuf:"bytes,6,rep,name=ticket_discovery_total,json=ticketDiscoveryTotal,proto3" json:"ticket_discovery_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + NoTicketProviderActive int64 `protobuf:"varint,7,opt,name=no_ticket_provider_active,json=noTicketProviderActive,proto3" json:"no_ticket_provider_active,omitempty"` + // Self-healing signals. + HealClaimsSubmittedTotal map[string]uint64 `protobuf:"bytes,8,rep,name=heal_claims_submitted_total,json=healClaimsSubmittedTotal,proto3" json:"heal_claims_submitted_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + HealClaimsReconciledTotal uint64 `protobuf:"varint,9,opt,name=heal_claims_reconciled_total,json=healClaimsReconciledTotal,proto3" json:"heal_claims_reconciled_total,omitempty"` + HealVerificationsSubmittedTotal map[string]uint64 `protobuf:"bytes,10,rep,name=heal_verifications_submitted_total,json=healVerificationsSubmittedTotal,proto3" json:"heal_verifications_submitted_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + HealVerificationsAlreadyExistsTotal uint64 `protobuf:"varint,11,opt,name=heal_verifications_already_exists_total,json=healVerificationsAlreadyExistsTotal,proto3" json:"heal_verifications_already_exists_total,omitempty"` + HealFinalizePublishesTotal uint64 `protobuf:"varint,12,opt,name=heal_finalize_publishes_total,json=healFinalizePublishesTotal,proto3" json:"heal_finalize_publishes_total,omitempty"` + HealFinalizeCleanupsTotal map[string]uint64 `protobuf:"bytes,13,rep,name=heal_finalize_cleanups_total,json=healFinalizeCleanupsTotal,proto3" json:"heal_finalize_cleanups_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + SelfHealingPendingClaims int64 `protobuf:"varint,14,opt,name=self_healing_pending_claims,json=selfHealingPendingClaims,proto3" json:"self_healing_pending_claims,omitempty"` + SelfHealingStagingBytes int64 `protobuf:"varint,15,opt,name=self_healing_staging_bytes,json=selfHealingStagingBytes,proto3" json:"self_healing_staging_bytes,omitempty"` + // Storage recheck signals. + RecheckCandidatesFoundTotal uint64 `protobuf:"varint,16,opt,name=recheck_candidates_found_total,json=recheckCandidatesFoundTotal,proto3" json:"recheck_candidates_found_total,omitempty"` + RecheckEvidenceSubmittedTotal map[string]uint64 `protobuf:"bytes,17,rep,name=recheck_evidence_submitted_total,json=recheckEvidenceSubmittedTotal,proto3" json:"recheck_evidence_submitted_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + RecheckEvidenceAlreadySubmittedTotal uint64 `protobuf:"varint,18,opt,name=recheck_evidence_already_submitted_total,json=recheckEvidenceAlreadySubmittedTotal,proto3" json:"recheck_evidence_already_submitted_total,omitempty"` + RecheckExecutionFailuresTotal map[string]uint64 `protobuf:"bytes,19,rep,name=recheck_execution_failures_total,json=recheckExecutionFailuresTotal,proto3" json:"recheck_execution_failures_total,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + RecheckPendingCandidates int64 `protobuf:"varint,20,opt,name=recheck_pending_candidates,json=recheckPendingCandidates,proto3" json:"recheck_pending_candidates,omitempty"` +} + +func (x *StatusResponse_LEP6Metrics) Reset() { + *x = StatusResponse_LEP6Metrics{} + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusResponse_LEP6Metrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_LEP6Metrics) ProtoMessage() {} + +func (x *StatusResponse_LEP6Metrics) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_LEP6Metrics.ProtoReflect.Descriptor instead. +func (*StatusResponse_LEP6Metrics) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *StatusResponse_LEP6Metrics) GetDispatchResultsTotal() map[string]uint64 { + if x != nil { + return x.DispatchResultsTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetDispatchThrottledTotal() map[string]uint64 { + if x != nil { + return x.DispatchThrottledTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetDispatchEpochDurationMillisTotal() map[string]uint64 { + if x != nil { + return x.DispatchEpochDurationMillisTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetDispatchEpochDurationMillisMax() map[string]uint64 { + if x != nil { + return x.DispatchEpochDurationMillisMax + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetDispatchEpochDurationCount() map[string]uint64 { + if x != nil { + return x.DispatchEpochDurationCount + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetTicketDiscoveryTotal() map[string]uint64 { + if x != nil { + return x.TicketDiscoveryTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetNoTicketProviderActive() int64 { + if x != nil { + return x.NoTicketProviderActive + } + return 0 +} + +func (x *StatusResponse_LEP6Metrics) GetHealClaimsSubmittedTotal() map[string]uint64 { + if x != nil { + return x.HealClaimsSubmittedTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetHealClaimsReconciledTotal() uint64 { + if x != nil { + return x.HealClaimsReconciledTotal + } + return 0 +} + +func (x *StatusResponse_LEP6Metrics) GetHealVerificationsSubmittedTotal() map[string]uint64 { + if x != nil { + return x.HealVerificationsSubmittedTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetHealVerificationsAlreadyExistsTotal() uint64 { + if x != nil { + return x.HealVerificationsAlreadyExistsTotal + } + return 0 +} + +func (x *StatusResponse_LEP6Metrics) GetHealFinalizePublishesTotal() uint64 { + if x != nil { + return x.HealFinalizePublishesTotal + } + return 0 +} + +func (x *StatusResponse_LEP6Metrics) GetHealFinalizeCleanupsTotal() map[string]uint64 { + if x != nil { + return x.HealFinalizeCleanupsTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetSelfHealingPendingClaims() int64 { + if x != nil { + return x.SelfHealingPendingClaims + } + return 0 +} + +func (x *StatusResponse_LEP6Metrics) GetSelfHealingStagingBytes() int64 { + if x != nil { + return x.SelfHealingStagingBytes + } + return 0 +} + +func (x *StatusResponse_LEP6Metrics) GetRecheckCandidatesFoundTotal() uint64 { + if x != nil { + return x.RecheckCandidatesFoundTotal + } + return 0 +} + +func (x *StatusResponse_LEP6Metrics) GetRecheckEvidenceSubmittedTotal() map[string]uint64 { + if x != nil { + return x.RecheckEvidenceSubmittedTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetRecheckEvidenceAlreadySubmittedTotal() uint64 { + if x != nil { + return x.RecheckEvidenceAlreadySubmittedTotal + } + return 0 +} + +func (x *StatusResponse_LEP6Metrics) GetRecheckExecutionFailuresTotal() map[string]uint64 { + if x != nil { + return x.RecheckExecutionFailuresTotal + } + return nil +} + +func (x *StatusResponse_LEP6Metrics) GetRecheckPendingCandidates() int64 { + if x != nil { + return x.RecheckPendingCandidates + } + return 0 +} + +type StatusResponse_Resources_CPU struct { + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) + Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores } func (x *StatusResponse_Resources_CPU) Reset() { *x = StatusResponse_Resources_CPU{} - mi := &file_supernode_status_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_Resources_CPU) String() string { @@ -467,8 +700,8 @@ func (x *StatusResponse_Resources_CPU) String() string { func (*StatusResponse_Resources_CPU) ProtoMessage() {} func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[6] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -498,20 +731,23 @@ func (x *StatusResponse_Resources_CPU) GetCores() int32 { } type StatusResponse_Resources_Memory struct { - state protoimpl.MessageState `protogen:"open.v1"` - TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB - UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB - AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB - UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB + UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB + AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB + UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) } func (x *StatusResponse_Resources_Memory) Reset() { *x = StatusResponse_Resources_Memory{} - mi := &file_supernode_status_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_Resources_Memory) String() string { @@ -521,8 +757,8 @@ func (x *StatusResponse_Resources_Memory) String() string { func (*StatusResponse_Resources_Memory) ProtoMessage() {} func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[7] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -566,21 +802,24 @@ func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 { } type StatusResponse_Resources_Storage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored - TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` - UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` - AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` - UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored + TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` + UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` + AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` + UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) } func (x *StatusResponse_Resources_Storage) Reset() { *x = StatusResponse_Resources_Storage{} - mi := &file_supernode_status_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_Resources_Storage) String() string { @@ -590,8 +829,8 @@ func (x *StatusResponse_Resources_Storage) String() string { func (*StatusResponse_Resources_Storage) ProtoMessage() {} func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[8] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -643,20 +882,23 @@ func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 { // Rolling DHT metrics snapshot type StatusResponse_P2PMetrics_DhtMetrics struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"` BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"` HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics{} - mi := &file_supernode_status_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { @@ -666,8 +908,8 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[9] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -712,20 +954,23 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 { // Per-handler counters from network layer type StatusResponse_P2PMetrics_HandleCounters struct { - state protoimpl.MessageState `protogen:"open.v1"` - Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` - Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` - Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` + Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` } func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { *x = StatusResponse_P2PMetrics_HandleCounters{} - mi := &file_supernode_status_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { @@ -735,8 +980,8 @@ func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[10] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -781,22 +1026,25 @@ func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 { // Ban list entry type StatusResponse_P2PMetrics_BanEntry struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID - Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port - Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count - CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) - AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID + Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count + CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) + AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds } func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { *x = StatusResponse_P2PMetrics_BanEntry{} - mi := &file_supernode_status_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_P2PMetrics_BanEntry) String() string { @@ -806,8 +1054,8 @@ func (x *StatusResponse_P2PMetrics_BanEntry) String() string { func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[11] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -866,18 +1114,21 @@ func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 { // DB stats type StatusResponse_P2PMetrics_DatabaseStats struct { - state protoimpl.MessageState `protogen:"open.v1"` - P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` - P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` + P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` } func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { *x = StatusResponse_P2PMetrics_DatabaseStats{} - mi := &file_supernode_status_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { @@ -887,8 +1138,8 @@ func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[12] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -919,19 +1170,22 @@ func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 { // Disk status type StatusResponse_P2PMetrics_DiskStatus struct { - state protoimpl.MessageState `protogen:"open.v1"` - AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` - UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` - FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` + UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` + FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` } func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { *x = StatusResponse_P2PMetrics_DiskStatus{} - mi := &file_supernode_status_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { @@ -941,8 +1195,8 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[13] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -979,20 +1233,23 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { } type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { - state protoimpl.MessageState `protogen:"open.v1"` - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted - Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs - SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted + Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs + SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) } func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} - mi := &file_supernode_status_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string { @@ -1002,8 +1259,8 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[16] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1047,22 +1304,25 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate( } type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { - state protoimpl.MessageState `protogen:"open.v1"` - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested - Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count - FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally - FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network - DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested + Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count + FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally + FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network + DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds } func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} - mi := &file_supernode_status_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_status_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() string { @@ -1072,8 +1332,8 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() strin func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_status_proto_msgTypes[17] - if x != nil { + mi := &file_supernode_status_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1132,125 +1392,428 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs( var File_supernode_status_proto protoreflect.FileDescriptor -const file_supernode_status_proto_rawDesc = "" + - "\n" + - "\x16supernode/status.proto\x12\tsupernode\"?\n" + - "\rStatusRequest\x12.\n" + - "\x13include_p2p_metrics\x18\x01 \x01(\bR\x11includeP2pMetrics\"\x84\x19\n" + - "\x0eStatusResponse\x12\x18\n" + - "\aversion\x18\x01 \x01(\tR\aversion\x12%\n" + - "\x0euptime_seconds\x18\x02 \x01(\x04R\ruptimeSeconds\x12A\n" + - "\tresources\x18\x03 \x01(\v2#.supernode.StatusResponse.ResourcesR\tresources\x12K\n" + - "\rrunning_tasks\x18\x04 \x03(\v2&.supernode.StatusResponse.ServiceTasksR\frunningTasks\x12/\n" + - "\x13registered_services\x18\x05 \x03(\tR\x12registeredServices\x12;\n" + - "\anetwork\x18\x06 \x01(\v2!.supernode.StatusResponse.NetworkR\anetwork\x12\x12\n" + - "\x04rank\x18\a \x01(\x05R\x04rank\x12\x1d\n" + - "\n" + - "ip_address\x18\b \x01(\tR\tipAddress\x12E\n" + - "\vp2p_metrics\x18\t \x01(\v2$.supernode.StatusResponse.P2PMetricsR\n" + - "p2pMetrics\x1a\x82\x05\n" + - "\tResources\x129\n" + - "\x03cpu\x18\x01 \x01(\v2'.supernode.StatusResponse.Resources.CPUR\x03cpu\x12B\n" + - "\x06memory\x18\x02 \x01(\v2*.supernode.StatusResponse.Resources.MemoryR\x06memory\x12T\n" + - "\x0fstorage_volumes\x18\x03 \x03(\v2+.supernode.StatusResponse.Resources.StorageR\x0estorageVolumes\x12)\n" + - "\x10hardware_summary\x18\x04 \x01(\tR\x0fhardwareSummary\x1a@\n" + - "\x03CPU\x12#\n" + - "\rusage_percent\x18\x01 \x01(\x01R\fusagePercent\x12\x14\n" + - "\x05cores\x18\x02 \x01(\x05R\x05cores\x1a\x84\x01\n" + - "\x06Memory\x12\x19\n" + - "\btotal_gb\x18\x01 \x01(\x01R\atotalGb\x12\x17\n" + - "\aused_gb\x18\x02 \x01(\x01R\x06usedGb\x12!\n" + - "\favailable_gb\x18\x03 \x01(\x01R\vavailableGb\x12#\n" + - "\rusage_percent\x18\x04 \x01(\x01R\fusagePercent\x1a\xab\x01\n" + - "\aStorage\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x1f\n" + - "\vtotal_bytes\x18\x02 \x01(\x04R\n" + - "totalBytes\x12\x1d\n" + - "\n" + - "used_bytes\x18\x03 \x01(\x04R\tusedBytes\x12'\n" + - "\x0favailable_bytes\x18\x04 \x01(\x04R\x0eavailableBytes\x12#\n" + - "\rusage_percent\x18\x05 \x01(\x01R\fusagePercent\x1ak\n" + - "\fServiceTasks\x12!\n" + - "\fservice_name\x18\x01 \x01(\tR\vserviceName\x12\x19\n" + - "\btask_ids\x18\x02 \x03(\tR\ataskIds\x12\x1d\n" + - "\n" + - "task_count\x18\x03 \x01(\x05R\ttaskCount\x1aQ\n" + - "\aNetwork\x12\x1f\n" + - "\vpeers_count\x18\x01 \x01(\x05R\n" + - "peersCount\x12%\n" + - "\x0epeer_addresses\x18\x02 \x03(\tR\rpeerAddresses\x1a\xf3\x0e\n" + - "\n" + - "P2PMetrics\x12P\n" + - "\vdht_metrics\x18\x01 \x01(\v2/.supernode.StatusResponse.P2PMetrics.DhtMetricsR\n" + - "dhtMetrics\x12t\n" + - "\x16network_handle_metrics\x18\x02 \x03(\v2>.supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntryR\x14networkHandleMetrics\x12e\n" + - "\x11conn_pool_metrics\x18\x03 \x03(\v29.supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntryR\x0fconnPoolMetrics\x12H\n" + - "\bban_list\x18\x04 \x03(\v2-.supernode.StatusResponse.P2PMetrics.BanEntryR\abanList\x12N\n" + - "\bdatabase\x18\x05 \x01(\v22.supernode.StatusResponse.P2PMetrics.DatabaseStatsR\bdatabase\x12C\n" + - "\x04disk\x18\x06 \x01(\v2/.supernode.StatusResponse.P2PMetrics.DiskStatusR\x04disk\x1a\xc0\x05\n" + - "\n" + - "DhtMetrics\x12s\n" + - "\x14store_success_recent\x18\x01 \x03(\v2A.supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPointR\x12storeSuccessRecent\x12v\n" + - "\x15batch_retrieve_recent\x18\x02 \x03(\v2B.supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePointR\x13batchRetrieveRecent\x121\n" + - "\x15hot_path_banned_skips\x18\x03 \x01(\x03R\x12hotPathBannedSkips\x125\n" + - "\x17hot_path_ban_increments\x18\x04 \x01(\x03R\x14hotPathBanIncrements\x1a\x8f\x01\n" + - "\x11StoreSuccessPoint\x12\x1b\n" + - "\ttime_unix\x18\x01 \x01(\x03R\btimeUnix\x12\x1a\n" + - "\brequests\x18\x02 \x01(\x05R\brequests\x12\x1e\n" + - "\n" + - "successful\x18\x03 \x01(\x05R\n" + - "successful\x12!\n" + - "\fsuccess_rate\x18\x04 \x01(\x01R\vsuccessRate\x1a\xc8\x01\n" + - "\x12BatchRetrievePoint\x12\x1b\n" + - "\ttime_unix\x18\x01 \x01(\x03R\btimeUnix\x12\x12\n" + - "\x04keys\x18\x02 \x01(\x05R\x04keys\x12\x1a\n" + - "\brequired\x18\x03 \x01(\x05R\brequired\x12\x1f\n" + - "\vfound_local\x18\x04 \x01(\x05R\n" + - "foundLocal\x12#\n" + - "\rfound_network\x18\x05 \x01(\x05R\ffoundNetwork\x12\x1f\n" + - "\vduration_ms\x18\x06 \x01(\x03R\n" + - "durationMs\x1at\n" + - "\x0eHandleCounters\x12\x14\n" + - "\x05total\x18\x01 \x01(\x03R\x05total\x12\x18\n" + - "\asuccess\x18\x02 \x01(\x03R\asuccess\x12\x18\n" + - "\afailure\x18\x03 \x01(\x03R\afailure\x12\x18\n" + - "\atimeout\x18\x04 \x01(\x03R\atimeout\x1a\x9d\x01\n" + - "\bBanEntry\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x0e\n" + - "\x02ip\x18\x02 \x01(\tR\x02ip\x12\x12\n" + - "\x04port\x18\x03 \x01(\rR\x04port\x12\x14\n" + - "\x05count\x18\x04 \x01(\x05R\x05count\x12&\n" + - "\x0fcreated_at_unix\x18\x05 \x01(\x03R\rcreatedAtUnix\x12\x1f\n" + - "\vage_seconds\x18\x06 \x01(\x03R\n" + - "ageSeconds\x1ae\n" + - "\rDatabaseStats\x12#\n" + - "\x0ep2p_db_size_mb\x18\x01 \x01(\x01R\vp2pDbSizeMb\x12/\n" + - "\x14p2p_db_records_count\x18\x02 \x01(\x03R\x11p2pDbRecordsCount\x1aU\n" + - "\n" + - "DiskStatus\x12\x15\n" + - "\x06all_mb\x18\x01 \x01(\x01R\x05allMb\x12\x17\n" + - "\aused_mb\x18\x02 \x01(\x01R\x06usedMb\x12\x17\n" + - "\afree_mb\x18\x03 \x01(\x01R\x06freeMb\x1a|\n" + - "\x19NetworkHandleMetricsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12I\n" + - "\x05value\x18\x02 \x01(\v23.supernode.StatusResponse.P2PMetrics.HandleCountersR\x05value:\x028\x01\x1aB\n" + - "\x14ConnPoolMetricsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01B6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" +var file_supernode_status_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x22, 0xac, 0x31, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x75, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x72, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70, + 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x12, 0x48, 0x0a, 0x0c, 0x6c, 0x65, 0x70, 0x36, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0b, 0x6c, + 0x65, 0x70, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, + 0x63, 0x70, 0x75, 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, + 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, + 0x10, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, + 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, + 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, + 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, + 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0b, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, + 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x1a, 0xab, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, + 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, + 0x6b, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, + 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, + 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, + 0xf3, 0x0e, 0x0a, 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, + 0x0a, 0x0b, 0x64, 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, + 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x12, 0x74, 0x0a, 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x14, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, + 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, + 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, + 0x08, 0x62, 0x61, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x62, 0x61, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, + 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, + 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, + 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, + 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, + 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, + 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, + 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, + 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, + 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, + 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, + 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, + 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, + 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, + 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, + 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, + 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, + 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, + 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, + 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, + 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, + 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, + 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xdb, 0x17, 0x0a, 0x0b, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x75, 0x0a, 0x16, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, + 0x70, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, + 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x7b, 0x0a, 0x18, + 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, + 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x16, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x9b, 0x01, 0x0a, 0x24, 0x64, 0x69, + 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x5f, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, + 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x20, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, + 0x70, 0x6f, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6c, 0x6c, + 0x69, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x95, 0x01, 0x0a, 0x22, 0x64, 0x69, 0x73, 0x70, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x70, + 0x61, 0x74, 0x63, 0x68, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x4d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x1e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x4d, 0x61, 0x78, 0x12, + 0x88, 0x01, 0x0a, 0x1d, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, + 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1a, + 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x75, 0x0a, 0x16, 0x74, 0x69, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x54, 0x6f, 0x74, 0x61, + 0x6c, 0x12, 0x39, 0x0a, 0x19, 0x6e, 0x6f, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6e, 0x6f, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x82, 0x01, 0x0a, + 0x1b, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x5f, 0x73, 0x75, 0x62, + 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x45, + 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x43, 0x6c, + 0x61, 0x69, 0x6d, 0x73, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, + 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x18, 0x68, 0x65, 0x61, 0x6c, 0x43, 0x6c, 0x61, + 0x69, 0x6d, 0x73, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, + 0x6c, 0x12, 0x3f, 0x0a, 0x1c, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, + 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x19, 0x68, 0x65, 0x61, 0x6c, 0x43, 0x6c, 0x61, + 0x69, 0x6d, 0x73, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x65, 0x64, 0x54, 0x6f, 0x74, + 0x61, 0x6c, 0x12, 0x97, 0x01, 0x0a, 0x22, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, + 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x4a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, + 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1f, 0x68, 0x65, 0x61, + 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x53, 0x75, + 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x54, 0x0a, 0x27, + 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, + 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x23, 0x68, + 0x65, 0x61, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x54, 0x6f, 0x74, + 0x61, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x66, 0x69, 0x6e, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x73, 0x5f, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1a, 0x68, 0x65, 0x61, 0x6c, 0x46, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x73, + 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x85, 0x01, 0x0a, 0x1c, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x66, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x73, + 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x19, 0x68, 0x65, 0x61, 0x6c, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x3d, 0x0a, + 0x1b, 0x73, 0x65, 0x6c, 0x66, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x66, 0x48, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x50, + 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x12, 0x3b, 0x0a, 0x1a, + 0x73, 0x65, 0x6c, 0x66, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x17, 0x73, 0x65, 0x6c, 0x66, 0x48, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x1e, 0x72, 0x65, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x73, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x1b, 0x72, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x73, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x91, + 0x01, 0x0a, 0x20, 0x72, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x65, 0x76, 0x69, 0x64, 0x65, + 0x6e, 0x63, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x73, 0x75, 0x70, 0x65, + 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x2e, 0x52, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x45, 0x76, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, + 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x1d, 0x72, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x45, 0x76, 0x69, 0x64, + 0x65, 0x6e, 0x63, 0x65, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, + 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x28, 0x72, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x65, 0x76, + 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x73, + 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x24, 0x72, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x45, 0x76, 0x69, + 0x64, 0x65, 0x6e, 0x63, 0x65, 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x53, 0x75, 0x62, 0x6d, + 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x91, 0x01, 0x0a, 0x20, 0x72, + 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, + 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4c, 0x45, 0x50, 0x36, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x1d, 0x72, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x3c, + 0x0a, 0x1a, 0x72, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x18, 0x72, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x50, 0x65, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x73, 0x1a, 0x47, 0x0a, 0x19, + 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x54, + 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, 0x1b, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, + 0x68, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x53, 0x0a, 0x25, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x70, 0x6f, 0x63, + 0x68, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x54, + 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x23, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, + 0x68, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, + 0x6c, 0x6c, 0x69, 0x73, 0x4d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x1f, 0x44, 0x69, 0x73, 0x70, + 0x61, 0x74, 0x63, 0x68, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x47, 0x0a, 0x19, 0x54, 0x69, 0x63, 0x6b, 0x65, + 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x4b, 0x0a, 0x1d, 0x48, 0x65, 0x61, 0x6c, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x53, 0x75, + 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x52, 0x0a, + 0x24, 0x48, 0x65, 0x61, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x4c, 0x0a, 0x1e, 0x48, 0x65, 0x61, 0x6c, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x50, 0x0a, 0x22, 0x52, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x45, 0x76, 0x69, 0x64, 0x65, 0x6e, + 0x63, 0x65, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x50, 0x0a, 0x22, 0x52, 0x65, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x54, 0x6f, 0x74, + 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} var ( file_supernode_status_proto_rawDescOnce sync.Once - file_supernode_status_proto_rawDescData []byte + file_supernode_status_proto_rawDescData = file_supernode_status_proto_rawDesc ) func file_supernode_status_proto_rawDescGZIP() []byte { file_supernode_status_proto_rawDescOnce.Do(func() { - file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc))) + file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_status_proto_rawDescData) }) return file_supernode_status_proto_rawDescData } -var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 30) var file_supernode_status_proto_goTypes = []any{ (*StatusRequest)(nil), // 0: supernode.StatusRequest (*StatusResponse)(nil), // 1: supernode.StatusResponse @@ -1258,41 +1821,65 @@ var file_supernode_status_proto_goTypes = []any{ (*StatusResponse_ServiceTasks)(nil), // 3: supernode.StatusResponse.ServiceTasks (*StatusResponse_Network)(nil), // 4: supernode.StatusResponse.Network (*StatusResponse_P2PMetrics)(nil), // 5: supernode.StatusResponse.P2PMetrics - (*StatusResponse_Resources_CPU)(nil), // 6: supernode.StatusResponse.Resources.CPU - (*StatusResponse_Resources_Memory)(nil), // 7: supernode.StatusResponse.Resources.Memory - (*StatusResponse_Resources_Storage)(nil), // 8: supernode.StatusResponse.Resources.Storage - (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 9: supernode.StatusResponse.P2PMetrics.DhtMetrics - (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 10: supernode.StatusResponse.P2PMetrics.HandleCounters - (*StatusResponse_P2PMetrics_BanEntry)(nil), // 11: supernode.StatusResponse.P2PMetrics.BanEntry - (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 12: supernode.StatusResponse.P2PMetrics.DatabaseStats - (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 13: supernode.StatusResponse.P2PMetrics.DiskStatus - nil, // 14: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - nil, // 15: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 16: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 17: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + (*StatusResponse_LEP6Metrics)(nil), // 6: supernode.StatusResponse.LEP6Metrics + (*StatusResponse_Resources_CPU)(nil), // 7: supernode.StatusResponse.Resources.CPU + (*StatusResponse_Resources_Memory)(nil), // 8: supernode.StatusResponse.Resources.Memory + (*StatusResponse_Resources_Storage)(nil), // 9: supernode.StatusResponse.Resources.Storage + (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 10: supernode.StatusResponse.P2PMetrics.DhtMetrics + (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 11: supernode.StatusResponse.P2PMetrics.HandleCounters + (*StatusResponse_P2PMetrics_BanEntry)(nil), // 12: supernode.StatusResponse.P2PMetrics.BanEntry + (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 13: supernode.StatusResponse.P2PMetrics.DatabaseStats + (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 14: supernode.StatusResponse.P2PMetrics.DiskStatus + nil, // 15: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + nil, // 16: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 17: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 18: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + nil, // 19: supernode.StatusResponse.LEP6Metrics.DispatchResultsTotalEntry + nil, // 20: supernode.StatusResponse.LEP6Metrics.DispatchThrottledTotalEntry + nil, // 21: supernode.StatusResponse.LEP6Metrics.DispatchEpochDurationMillisTotalEntry + nil, // 22: supernode.StatusResponse.LEP6Metrics.DispatchEpochDurationMillisMaxEntry + nil, // 23: supernode.StatusResponse.LEP6Metrics.DispatchEpochDurationCountEntry + nil, // 24: supernode.StatusResponse.LEP6Metrics.TicketDiscoveryTotalEntry + nil, // 25: supernode.StatusResponse.LEP6Metrics.HealClaimsSubmittedTotalEntry + nil, // 26: supernode.StatusResponse.LEP6Metrics.HealVerificationsSubmittedTotalEntry + nil, // 27: supernode.StatusResponse.LEP6Metrics.HealFinalizeCleanupsTotalEntry + nil, // 28: supernode.StatusResponse.LEP6Metrics.RecheckEvidenceSubmittedTotalEntry + nil, // 29: supernode.StatusResponse.LEP6Metrics.RecheckExecutionFailuresTotalEntry } var file_supernode_status_proto_depIdxs = []int32{ 2, // 0: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources 3, // 1: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks 4, // 2: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network 5, // 3: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics - 6, // 4: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU - 7, // 5: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory - 8, // 6: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage - 9, // 7: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics - 14, // 8: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - 15, // 9: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - 11, // 10: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry - 12, // 11: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats - 13, // 12: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus - 16, // 13: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - 17, // 14: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint - 10, // 15: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 6, // 4: supernode.StatusResponse.lep6_metrics:type_name -> supernode.StatusResponse.LEP6Metrics + 7, // 5: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU + 8, // 6: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory + 9, // 7: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage + 10, // 8: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics + 15, // 9: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + 16, // 10: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + 12, // 11: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry + 13, // 12: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats + 14, // 13: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus + 19, // 14: supernode.StatusResponse.LEP6Metrics.dispatch_results_total:type_name -> supernode.StatusResponse.LEP6Metrics.DispatchResultsTotalEntry + 20, // 15: supernode.StatusResponse.LEP6Metrics.dispatch_throttled_total:type_name -> supernode.StatusResponse.LEP6Metrics.DispatchThrottledTotalEntry + 21, // 16: supernode.StatusResponse.LEP6Metrics.dispatch_epoch_duration_millis_total:type_name -> supernode.StatusResponse.LEP6Metrics.DispatchEpochDurationMillisTotalEntry + 22, // 17: supernode.StatusResponse.LEP6Metrics.dispatch_epoch_duration_millis_max:type_name -> supernode.StatusResponse.LEP6Metrics.DispatchEpochDurationMillisMaxEntry + 23, // 18: supernode.StatusResponse.LEP6Metrics.dispatch_epoch_duration_count:type_name -> supernode.StatusResponse.LEP6Metrics.DispatchEpochDurationCountEntry + 24, // 19: supernode.StatusResponse.LEP6Metrics.ticket_discovery_total:type_name -> supernode.StatusResponse.LEP6Metrics.TicketDiscoveryTotalEntry + 25, // 20: supernode.StatusResponse.LEP6Metrics.heal_claims_submitted_total:type_name -> supernode.StatusResponse.LEP6Metrics.HealClaimsSubmittedTotalEntry + 26, // 21: supernode.StatusResponse.LEP6Metrics.heal_verifications_submitted_total:type_name -> supernode.StatusResponse.LEP6Metrics.HealVerificationsSubmittedTotalEntry + 27, // 22: supernode.StatusResponse.LEP6Metrics.heal_finalize_cleanups_total:type_name -> supernode.StatusResponse.LEP6Metrics.HealFinalizeCleanupsTotalEntry + 28, // 23: supernode.StatusResponse.LEP6Metrics.recheck_evidence_submitted_total:type_name -> supernode.StatusResponse.LEP6Metrics.RecheckEvidenceSubmittedTotalEntry + 29, // 24: supernode.StatusResponse.LEP6Metrics.recheck_execution_failures_total:type_name -> supernode.StatusResponse.LEP6Metrics.RecheckExecutionFailuresTotalEntry + 17, // 25: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + 18, // 26: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + 11, // 27: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters + 28, // [28:28] is the sub-list for method output_type + 28, // [28:28] is the sub-list for method input_type + 28, // [28:28] is the sub-list for extension type_name + 28, // [28:28] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name } func init() { file_supernode_status_proto_init() } @@ -1300,13 +1887,219 @@ func file_supernode_status_proto_init() { if File_supernode_status_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_supernode_status_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*StatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_Resources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_ServiceTasks); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_Network); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_P2PMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_LEP6Metrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_Resources_CPU); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_Resources_Memory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_Resources_Storage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_P2PMetrics_DhtMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_P2PMetrics_HandleCounters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_P2PMetrics_BanEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_P2PMetrics_DatabaseStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_P2PMetrics_DiskStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_status_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc)), + RawDescriptor: file_supernode_status_proto_rawDesc, NumEnums: 0, - NumMessages: 18, + NumMessages: 30, NumExtensions: 0, NumServices: 0, }, @@ -1315,6 +2108,7 @@ func file_supernode_status_proto_init() { MessageInfos: file_supernode_status_proto_msgTypes, }.Build() File_supernode_status_proto = out.File + file_supernode_status_proto_rawDesc = nil file_supernode_status_proto_goTypes = nil file_supernode_status_proto_depIdxs = nil } diff --git a/gen/supernode/storage_challenge.swagger.json b/gen/supernode/storage_challenge.swagger.json index 9304b937..23d3083a 100644 --- a/gen/supernode/storage_challenge.swagger.json +++ b/gen/supernode/storage_challenge.swagger.json @@ -45,6 +45,73 @@ } } }, + "supernodeByteRange": { + "type": "object", + "properties": { + "start": { + "type": "string", + "format": "uint64" + }, + "end": { + "type": "string", + "format": "uint64", + "title": "exclusive" + } + }, + "description": "ByteRange represents a half-open byte range [start, end) into an artifact." + }, + "supernodeGetCompoundProofResponse": { + "type": "object", + "properties": { + "challengeId": { + "type": "string" + }, + "epochId": { + "type": "string", + "format": "uint64" + }, + "ticketId": { + "type": "string" + }, + "artifactClass": { + "type": "integer", + "format": "int64" + }, + "artifactOrdinal": { + "type": "integer", + "format": "int64" + }, + "bucketType": { + "type": "integer", + "format": "int64" + }, + "artifactKey": { + "type": "string" + }, + "rangeBytes": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + }, + "title": "i-th matches i-th request range" + }, + "proofHashHex": { + "type": "string", + "title": "BLAKE3(concat(range_bytes...)) lowercase hex" + }, + "recipientSignature": { + "type": "string", + "title": "recipient's keyring signature" + }, + "ok": { + "type": "boolean" + }, + "error": { + "type": "string" + } + } + }, "supernodeGetSliceProofResponse": { "type": "object", "properties": { diff --git a/pkg/lumera/modules/audit/impl_test.go b/pkg/lumera/modules/audit/impl_test.go new file mode 100644 index 00000000..b57a817f --- /dev/null +++ b/pkg/lumera/modules/audit/impl_test.go @@ -0,0 +1,13 @@ +package audit + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewModuleRejectsNilConnection(t *testing.T) { + m, err := NewModule(nil) + require.Nil(t, m) + require.ErrorContains(t, err, "connection cannot be nil") +} diff --git a/pkg/lumera/modules/audit_msg/impl_test.go b/pkg/lumera/modules/audit_msg/impl_test.go new file mode 100644 index 00000000..07e9b020 --- /dev/null +++ b/pkg/lumera/modules/audit_msg/impl_test.go @@ -0,0 +1,46 @@ +package audit_msg + +import ( + "context" + "strings" + "testing" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/stretchr/testify/require" +) + +func TestClaimHealCompleteValidatesInputsBeforeTxExecution(t *testing.T) { + m := &module{} + _, err := m.ClaimHealComplete(context.Background(), 0, "ticket", "manifest", "") + require.ErrorContains(t, err, "heal op id cannot be zero") + + _, err = m.ClaimHealComplete(context.Background(), 1, " ", "manifest", "") + require.ErrorContains(t, err, "ticket id cannot be empty") + + _, err = m.ClaimHealComplete(context.Background(), 1, "ticket", " ", "") + require.ErrorContains(t, err, "heal manifest hash cannot be empty") +} + +func TestSubmitHealVerificationValidatesInputsBeforeTxExecution(t *testing.T) { + m := &module{} + _, err := m.SubmitHealVerification(context.Background(), 0, true, "hash", "") + require.ErrorContains(t, err, "heal op id cannot be zero") + + _, err = m.SubmitHealVerification(context.Background(), 1, true, " ", "") + require.ErrorContains(t, err, "verification hash cannot be empty") +} + +func TestSubmitStorageRecheckEvidenceValidatesInputsBeforeTxExecution(t *testing.T) { + m := &module{} + _, err := m.SubmitStorageRecheckEvidence(context.Background(), 7, " ", "ticket", "challenged", "recheck", audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_RECHECK_CONFIRMED_FAIL, "") + require.ErrorContains(t, err, "challenged supernode account cannot be empty") + + _, err = m.SubmitStorageRecheckEvidence(context.Background(), 7, "target", " ", "challenged", "recheck", audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_RECHECK_CONFIRMED_FAIL, "") + require.ErrorContains(t, err, "ticket id cannot be empty") + + _, err = m.SubmitStorageRecheckEvidence(context.Background(), 7, "target", "ticket", " ", "recheck", audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_RECHECK_CONFIRMED_FAIL, "") + require.ErrorContains(t, err, "challenged result transcript hash cannot be empty") + + _, err = m.SubmitStorageRecheckEvidence(context.Background(), 7, "target", "ticket", "challenged", strings.Repeat(" ", 3), audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_RECHECK_CONFIRMED_FAIL, "") + require.ErrorContains(t, err, "recheck transcript hash cannot be empty") +} diff --git a/pkg/metrics/lep6/metrics.go b/pkg/metrics/lep6/metrics.go new file mode 100644 index 00000000..2dec3fba --- /dev/null +++ b/pkg/metrics/lep6/metrics.go @@ -0,0 +1,263 @@ +// Package lep6 owns in-process observability counters for the off-chain LEP-6 stack. +// +// The supernode repo does not expose service-specific Prometheus collectors today; +// comparable subsystems use structured logtrace calls plus typed in-process snapshots +// (for example p2p/kademlia handler counters surfaced through status). Keep LEP-6 +// aligned with that pattern: hot paths increment cheap atomic counters/gauges and +// tests/status/debug callers can inspect Snapshot(). +package lep6 + +import ( + "sort" + "strings" + "sync" + "sync/atomic" + "time" +) + +// MetricsSnapshot is a point-in-time copy of LEP-6 off-chain observability signals. +// Counter maps use stable label keys in the form documented on each field. +type MetricsSnapshot struct { + // Storage challenge / dispatcher — LEP-6 §§9-12. + DispatchResultsTotal map[string]uint64 // result_class + DispatchThrottledTotal map[string]uint64 // policy + DispatchEpochDurationMillisTotal map[string]uint64 // role + DispatchEpochDurationMillisMax map[string]uint64 // role + DispatchEpochDurationCount map[string]uint64 // role + TicketDiscoveryTotal map[string]uint64 // result + NoTicketProviderActive int64 + + // Self-healing — LEP-6 §§18-22. + HealClaimsSubmittedTotal map[string]uint64 // outcome + HealClaimsReconciledTotal uint64 + HealVerificationsSubmittedTotal map[string]uint64 // verified=,result= + HealVerificationsAlreadyExistsTotal uint64 + HealFinalizePublishesTotal uint64 + HealFinalizeCleanupsTotal map[string]uint64 // status + SelfHealingPendingClaims int64 + SelfHealingStagingBytes int64 + + // Recheck — LEP-6 §12.3 and §15.1. + RecheckCandidatesFoundTotal uint64 + RecheckEvidenceSubmittedTotal map[string]uint64 // class=,outcome= + RecheckEvidenceAlreadySubmittedTotal uint64 + RecheckExecutionFailuresTotal map[string]uint64 // reason + RecheckPendingCandidates int64 +} + +type counterMap struct { + mu sync.RWMutex + m map[string]*atomic.Uint64 +} + +func (c *counterMap) inc(key string, delta uint64) { + key = normalizeLabel(key) + c.mu.RLock() + v := c.m[key] + c.mu.RUnlock() + if v == nil { + c.mu.Lock() + if c.m == nil { + c.m = make(map[string]*atomic.Uint64) + } + v = c.m[key] + if v == nil { + v = &atomic.Uint64{} + c.m[key] = v + } + c.mu.Unlock() + } + v.Add(delta) +} + +func (c *counterMap) setMax(key string, value uint64) { + key = normalizeLabel(key) + c.mu.RLock() + v := c.m[key] + c.mu.RUnlock() + if v == nil { + c.mu.Lock() + if c.m == nil { + c.m = make(map[string]*atomic.Uint64) + } + v = c.m[key] + if v == nil { + v = &atomic.Uint64{} + c.m[key] = v + } + c.mu.Unlock() + } + for { + old := v.Load() + if value <= old || v.CompareAndSwap(old, value) { + return + } + } +} + +func (c *counterMap) snapshot() map[string]uint64 { + c.mu.RLock() + defer c.mu.RUnlock() + out := make(map[string]uint64, len(c.m)) + keys := make([]string, 0, len(c.m)) + for k := range c.m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + out[k] = c.m[k].Load() + } + return out +} + +func (c *counterMap) reset() { + c.mu.Lock() + c.m = make(map[string]*atomic.Uint64) + c.mu.Unlock() +} + +var metrics = struct { + dispatchResults counterMap + dispatchThrottled counterMap + dispatchEpochMillisTotal counterMap + dispatchEpochMillisMax counterMap + dispatchEpochCount counterMap + ticketDiscovery counterMap + noTicketProviderActive atomic.Int64 + + healClaimsSubmitted counterMap + healClaimsReconciled atomic.Uint64 + healVerificationsSubmitted counterMap + healVerificationsAlreadyExist atomic.Uint64 + healFinalizePublishes atomic.Uint64 + healFinalizeCleanups counterMap + selfHealingPendingClaims atomic.Int64 + selfHealingStagingBytes atomic.Int64 + + recheckCandidatesFound atomic.Uint64 + recheckEvidenceSubmitted counterMap + recheckEvidenceAlreadySubmitted atomic.Uint64 + recheckExecutionFailures counterMap + recheckPendingCandidates atomic.Int64 +}{} + +// Reset clears all counters/gauges. It is intended for tests. +func Reset() { + metrics.dispatchResults.reset() + metrics.dispatchThrottled.reset() + metrics.dispatchEpochMillisTotal.reset() + metrics.dispatchEpochMillisMax.reset() + metrics.dispatchEpochCount.reset() + metrics.ticketDiscovery.reset() + metrics.noTicketProviderActive.Store(0) + metrics.healClaimsSubmitted.reset() + metrics.healClaimsReconciled.Store(0) + metrics.healVerificationsSubmitted.reset() + metrics.healVerificationsAlreadyExist.Store(0) + metrics.healFinalizePublishes.Store(0) + metrics.healFinalizeCleanups.reset() + metrics.selfHealingPendingClaims.Store(0) + metrics.selfHealingStagingBytes.Store(0) + metrics.recheckCandidatesFound.Store(0) + metrics.recheckEvidenceSubmitted.reset() + metrics.recheckEvidenceAlreadySubmitted.Store(0) + metrics.recheckExecutionFailures.reset() + metrics.recheckPendingCandidates.Store(0) +} + +// Snapshot returns a consistent copy of current LEP-6 metrics. +func Snapshot() MetricsSnapshot { + return MetricsSnapshot{ + DispatchResultsTotal: metrics.dispatchResults.snapshot(), + DispatchThrottledTotal: metrics.dispatchThrottled.snapshot(), + DispatchEpochDurationMillisTotal: metrics.dispatchEpochMillisTotal.snapshot(), + DispatchEpochDurationMillisMax: metrics.dispatchEpochMillisMax.snapshot(), + DispatchEpochDurationCount: metrics.dispatchEpochCount.snapshot(), + TicketDiscoveryTotal: metrics.ticketDiscovery.snapshot(), + NoTicketProviderActive: metrics.noTicketProviderActive.Load(), + HealClaimsSubmittedTotal: metrics.healClaimsSubmitted.snapshot(), + HealClaimsReconciledTotal: metrics.healClaimsReconciled.Load(), + HealVerificationsSubmittedTotal: metrics.healVerificationsSubmitted.snapshot(), + HealVerificationsAlreadyExistsTotal: metrics.healVerificationsAlreadyExist.Load(), + HealFinalizePublishesTotal: metrics.healFinalizePublishes.Load(), + HealFinalizeCleanupsTotal: metrics.healFinalizeCleanups.snapshot(), + SelfHealingPendingClaims: metrics.selfHealingPendingClaims.Load(), + SelfHealingStagingBytes: metrics.selfHealingStagingBytes.Load(), + RecheckCandidatesFoundTotal: metrics.recheckCandidatesFound.Load(), + RecheckEvidenceSubmittedTotal: metrics.recheckEvidenceSubmitted.snapshot(), + RecheckEvidenceAlreadySubmittedTotal: metrics.recheckEvidenceAlreadySubmitted.Load(), + RecheckExecutionFailuresTotal: metrics.recheckExecutionFailures.snapshot(), + RecheckPendingCandidates: metrics.recheckPendingCandidates.Load(), + } +} + +func IncDispatchResult(resultClass string) { metrics.dispatchResults.inc(resultClass, 1) } +func IncDispatchThrottled(policy string, dropped int) { + if dropped > 0 { + metrics.dispatchThrottled.inc(policy, uint64(dropped)) + } +} +func ObserveDispatchEpochDuration(role string, duration time.Duration) { + if duration < 0 { + duration = 0 + } + millis := uint64(duration.Milliseconds()) + metrics.dispatchEpochMillisTotal.inc(role, millis) + metrics.dispatchEpochMillisMax.setMax(role, millis) + metrics.dispatchEpochCount.inc(role, 1) +} +func IncTicketDiscovery(result string) { metrics.ticketDiscovery.inc(result, 1) } +func SetNoTicketProviderActive(active bool) { + if active { + metrics.noTicketProviderActive.Store(1) + } else { + metrics.noTicketProviderActive.Store(0) + } +} + +func IncHealClaim(outcome string) { metrics.healClaimsSubmitted.inc(outcome, 1) } +func IncHealClaimReconciled() { metrics.healClaimsReconciled.Add(1) } +func IncHealVerification(outcome string, verified bool) { + vote := "negative" + if verified { + vote = "positive" + } + metrics.healVerificationsSubmitted.inc("verified="+vote+",result="+normalizeLabel(outcome), 1) +} +func IncHealVerificationAlreadyExists() { metrics.healVerificationsAlreadyExist.Add(1) } +func IncHealFinalizePublish() { metrics.healFinalizePublishes.Add(1) } +func IncHealFinalizeCleanup(status string) { metrics.healFinalizeCleanups.inc(status, 1) } +func SetSelfHealingPendingClaims(count int) { + metrics.selfHealingPendingClaims.Store(nonNegativeInt64(count)) +} +func SetSelfHealingStagingBytes(bytes int64) { + if bytes < 0 { + bytes = 0 + } + metrics.selfHealingStagingBytes.Store(bytes) +} + +func IncRecheckCandidateFound() { metrics.recheckCandidatesFound.Add(1) } +func IncRecheckSubmission(resultClass, outcome string) { + metrics.recheckEvidenceSubmitted.inc("class="+normalizeLabel(resultClass)+",outcome="+normalizeLabel(outcome), 1) +} +func IncRecheckAlreadySubmitted() { metrics.recheckEvidenceAlreadySubmitted.Add(1) } +func IncRecheckFailure(reason string) { metrics.recheckExecutionFailures.inc(reason, 1) } +func SetRecheckPendingCandidates(count int) { + metrics.recheckPendingCandidates.Store(nonNegativeInt64(count)) +} + +func normalizeLabel(label string) string { + label = strings.TrimSpace(strings.ToLower(label)) + if label == "" { + return "unknown" + } + return label +} + +func nonNegativeInt64(v int) int64 { + if v < 0 { + return 0 + } + return int64(v) +} diff --git a/pkg/metrics/lep6/metrics_test.go b/pkg/metrics/lep6/metrics_test.go new file mode 100644 index 00000000..d82a7485 --- /dev/null +++ b/pkg/metrics/lep6/metrics_test.go @@ -0,0 +1,92 @@ +package lep6 + +import ( + "testing" + "time" +) + +func TestSnapshotTracksFullLEP6SignalSet(t *testing.T) { + Reset() + + IncDispatchResult("PASS") + IncDispatchThrottled("drop-non-RECENT-first", 3) + ObserveDispatchEpochDuration("challenger", 1500*time.Millisecond) + ObserveDispatchEpochDuration("challenger", 500*time.Millisecond) + IncTicketDiscovery("eligible") + SetNoTicketProviderActive(true) + + IncHealClaim("submitted") + IncHealClaimReconciled() + IncHealVerification("submitted", true) + IncHealVerification("dedup", false) + IncHealVerificationAlreadyExists() + IncHealFinalizePublish() + IncHealFinalizeCleanup("FAILED") + SetSelfHealingPendingClaims(2) + SetSelfHealingStagingBytes(4096) + + IncRecheckCandidateFound() + IncRecheckSubmission("RECHECK_CONFIRMED_FAIL", "submitted") + IncRecheckAlreadySubmitted() + IncRecheckFailure("execute") + SetRecheckPendingCandidates(7) + + s := Snapshot() + assertCounter(t, s.DispatchResultsTotal, "pass", 1) + assertCounter(t, s.DispatchThrottledTotal, "drop-non-recent-first", 3) + assertCounter(t, s.DispatchEpochDurationMillisTotal, "challenger", 2000) + assertCounter(t, s.DispatchEpochDurationMillisMax, "challenger", 1500) + assertCounter(t, s.DispatchEpochDurationCount, "challenger", 2) + assertCounter(t, s.TicketDiscoveryTotal, "eligible", 1) + if s.NoTicketProviderActive != 1 { + t.Fatalf("NoTicketProviderActive = %d, want 1", s.NoTicketProviderActive) + } + assertCounter(t, s.HealClaimsSubmittedTotal, "submitted", 1) + if s.HealClaimsReconciledTotal != 1 { + t.Fatalf("HealClaimsReconciledTotal = %d, want 1", s.HealClaimsReconciledTotal) + } + assertCounter(t, s.HealVerificationsSubmittedTotal, "verified=positive,result=submitted", 1) + assertCounter(t, s.HealVerificationsSubmittedTotal, "verified=negative,result=dedup", 1) + if s.HealVerificationsAlreadyExistsTotal != 1 { + t.Fatalf("HealVerificationsAlreadyExistsTotal = %d, want 1", s.HealVerificationsAlreadyExistsTotal) + } + if s.HealFinalizePublishesTotal != 1 { + t.Fatalf("HealFinalizePublishesTotal = %d, want 1", s.HealFinalizePublishesTotal) + } + assertCounter(t, s.HealFinalizeCleanupsTotal, "failed", 1) + if s.SelfHealingPendingClaims != 2 || s.SelfHealingStagingBytes != 4096 { + t.Fatalf("self-healing gauges = (%d,%d), want (2,4096)", s.SelfHealingPendingClaims, s.SelfHealingStagingBytes) + } + if s.RecheckCandidatesFoundTotal != 1 { + t.Fatalf("RecheckCandidatesFoundTotal = %d, want 1", s.RecheckCandidatesFoundTotal) + } + assertCounter(t, s.RecheckEvidenceSubmittedTotal, "class=recheck_confirmed_fail,outcome=submitted", 1) + if s.RecheckEvidenceAlreadySubmittedTotal != 1 { + t.Fatalf("RecheckEvidenceAlreadySubmittedTotal = %d, want 1", s.RecheckEvidenceAlreadySubmittedTotal) + } + assertCounter(t, s.RecheckExecutionFailuresTotal, "execute", 1) + if s.RecheckPendingCandidates != 7 { + t.Fatalf("RecheckPendingCandidates = %d, want 7", s.RecheckPendingCandidates) + } +} + +func TestResetClearsMetrics(t *testing.T) { + Reset() + IncDispatchResult("PASS") + SetSelfHealingPendingClaims(9) + Reset() + s := Snapshot() + if len(s.DispatchResultsTotal) != 0 { + t.Fatalf("DispatchResultsTotal after Reset = %#v, want empty", s.DispatchResultsTotal) + } + if s.SelfHealingPendingClaims != 0 { + t.Fatalf("SelfHealingPendingClaims after Reset = %d, want 0", s.SelfHealingPendingClaims) + } +} + +func assertCounter(t *testing.T, got map[string]uint64, key string, want uint64) { + t.Helper() + if got[key] != want { + t.Fatalf("counter[%q] = %d, want %d (all=%#v)", key, got[key], want, got) + } +} diff --git a/pkg/storage/queries/recheck.go b/pkg/storage/queries/recheck.go index 98b03b35..578a01c7 100644 --- a/pkg/storage/queries/recheck.go +++ b/pkg/storage/queries/recheck.go @@ -17,6 +17,7 @@ type RecheckSubmissionRecord struct { RecheckTranscriptHash string ResultClass audittypes.StorageProofResultClass SubmittedAt int64 + Status string } const createStorageRecheckSubmissions = ` @@ -27,10 +28,27 @@ CREATE TABLE IF NOT EXISTS storage_recheck_submissions ( challenged_transcript_hash TEXT NOT NULL, recheck_transcript_hash TEXT NOT NULL, result_class INTEGER NOT NULL, + status TEXT NOT NULL DEFAULT 'submitted', submitted_at INTEGER NOT NULL, PRIMARY KEY (epoch_id, ticket_id) );` +const createStorageRecheckSubmissionStatusIndex = `CREATE INDEX IF NOT EXISTS idx_storage_recheck_submissions_status ON storage_recheck_submissions(status);` +const alterStorageRecheckSubmissionStatus = `ALTER TABLE storage_recheck_submissions ADD COLUMN status TEXT NOT NULL DEFAULT 'submitted';` + +const createRecheckAttemptFailures = ` +CREATE TABLE IF NOT EXISTS recheck_attempt_failures ( + epoch_id INTEGER NOT NULL, + ticket_id TEXT NOT NULL, + target_account TEXT NOT NULL, + attempts INTEGER NOT NULL DEFAULT 1, + last_error TEXT, + expires_at INTEGER NOT NULL, + PRIMARY KEY (epoch_id, ticket_id) +);` + +const createRecheckAttemptFailuresExpiresIndex = `CREATE INDEX IF NOT EXISTS idx_recheck_attempt_failures_expires ON recheck_attempt_failures(expires_at);` + func (s *SQLiteStore) HasRecheckSubmission(ctx context.Context, epochID uint64, ticketID string) (bool, error) { const stmt = `SELECT 1 FROM storage_recheck_submissions WHERE epoch_id = ? AND ticket_id = ? LIMIT 1` var one int @@ -44,11 +62,71 @@ func (s *SQLiteStore) HasRecheckSubmission(ctx context.Context, epochID uint64, return true, nil } +func (s *SQLiteStore) RecordPendingRecheckSubmission(ctx context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass) error { + return s.recordRecheckSubmissionWithStatus(ctx, epochID, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash, resultClass, "pending") +} + func (s *SQLiteStore) RecordRecheckSubmission(ctx context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass) error { - const stmt = `INSERT OR IGNORE INTO storage_recheck_submissions (epoch_id, ticket_id, target_account, challenged_transcript_hash, recheck_transcript_hash, result_class, submitted_at) VALUES (?, ?, ?, ?, ?, ?, ?)` + return s.recordRecheckSubmissionWithStatus(ctx, epochID, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash, resultClass, "submitted") +} + +func (s *SQLiteStore) recordRecheckSubmissionWithStatus(ctx context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass, status string) error { + const stmt = `INSERT OR IGNORE INTO storage_recheck_submissions (epoch_id, ticket_id, target_account, challenged_transcript_hash, recheck_transcript_hash, result_class, status, submitted_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)` + if epochID == 0 || ticketID == "" { + return fmt.Errorf("epoch_id and ticket_id are required") + } + _, err := s.db.ExecContext(ctx, stmt, epochID, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash, int32(resultClass), status, time.Now().Unix()) + return err +} + +func (s *SQLiteStore) MarkRecheckSubmissionSubmitted(ctx context.Context, epochID uint64, ticketID string) error { + _, err := s.db.ExecContext(ctx, `UPDATE storage_recheck_submissions SET status = 'submitted', submitted_at = ? WHERE epoch_id = ? AND ticket_id = ?`, time.Now().Unix(), epochID, ticketID) + return err +} + +func (s *SQLiteStore) DeletePendingRecheckSubmission(ctx context.Context, epochID uint64, ticketID string) error { + _, err := s.db.ExecContext(ctx, `DELETE FROM storage_recheck_submissions WHERE epoch_id = ? AND ticket_id = ? AND status = 'pending'`, epochID, ticketID) + return err +} + +func (s *SQLiteStore) RecordRecheckAttemptFailure(ctx context.Context, epochID uint64, ticketID, targetAccount string, err error, ttl time.Duration) error { if epochID == 0 || ticketID == "" { return fmt.Errorf("epoch_id and ticket_id are required") } - _, err := s.db.ExecContext(ctx, stmt, epochID, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash, int32(resultClass), time.Now().Unix()) + msg := "" + if err != nil { + msg = err.Error() + } + expiresAt := time.Now().Add(ttl).Unix() + const stmt = `INSERT INTO recheck_attempt_failures (epoch_id, ticket_id, target_account, attempts, last_error, expires_at) +VALUES (?, ?, ?, 1, ?, ?) +ON CONFLICT(epoch_id, ticket_id) DO UPDATE SET attempts = attempts + 1, last_error = excluded.last_error, expires_at = excluded.expires_at` + _, execErr := s.db.ExecContext(ctx, stmt, epochID, ticketID, targetAccount, msg, expiresAt) + return execErr +} + +func (s *SQLiteStore) HasRecheckAttemptFailureBudgetExceeded(ctx context.Context, epochID uint64, ticketID string, maxAttempts int) (bool, error) { + if maxAttempts <= 0 { + return false, nil + } + const stmt = `SELECT attempts, expires_at FROM recheck_attempt_failures WHERE epoch_id = ? AND ticket_id = ? LIMIT 1` + var attempts int + var expiresAt int64 + err := s.db.QueryRowContext(ctx, stmt, epochID, ticketID).Scan(&attempts, &expiresAt) + if err == sql.ErrNoRows { + return false, nil + } + if err != nil { + return false, err + } + if expiresAt <= time.Now().Unix() { + _, _ = s.db.ExecContext(ctx, `DELETE FROM recheck_attempt_failures WHERE epoch_id = ? AND ticket_id = ?`, epochID, ticketID) + return false, nil + } + return attempts >= maxAttempts, nil +} + +func (s *SQLiteStore) PurgeExpiredRecheckAttemptFailures(ctx context.Context) error { + _, err := s.db.ExecContext(ctx, `DELETE FROM recheck_attempt_failures WHERE expires_at <= ?`, time.Now().Unix()) return err } diff --git a/pkg/storage/queries/recheck_interface.go b/pkg/storage/queries/recheck_interface.go index 8cab83c8..71ce1907 100644 --- a/pkg/storage/queries/recheck_interface.go +++ b/pkg/storage/queries/recheck_interface.go @@ -2,11 +2,18 @@ package queries import ( "context" + "time" audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" ) type RecheckQueries interface { HasRecheckSubmission(ctx context.Context, epochID uint64, ticketID string) (bool, error) + RecordPendingRecheckSubmission(ctx context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass) error + MarkRecheckSubmissionSubmitted(ctx context.Context, epochID uint64, ticketID string) error + DeletePendingRecheckSubmission(ctx context.Context, epochID uint64, ticketID string) error RecordRecheckSubmission(ctx context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass) error + RecordRecheckAttemptFailure(ctx context.Context, epochID uint64, ticketID, targetAccount string, err error, ttl time.Duration) error + HasRecheckAttemptFailureBudgetExceeded(ctx context.Context, epochID uint64, ticketID string, maxAttempts int) (bool, error) + PurgeExpiredRecheckAttemptFailures(ctx context.Context) error } diff --git a/pkg/storage/queries/recheck_test.go b/pkg/storage/queries/recheck_test.go index 2319cff1..d5d1f766 100644 --- a/pkg/storage/queries/recheck_test.go +++ b/pkg/storage/queries/recheck_test.go @@ -3,10 +3,12 @@ package queries import ( "context" "testing" + "time" audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/jmoiron/sqlx" _ "github.com/mattn/go-sqlite3" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -39,3 +41,26 @@ func TestRecheckSubmissionDedupKeyEpochTicket(t *testing.T) { require.NoError(t, db.QueryRowContext(ctx, `SELECT target_account FROM storage_recheck_submissions WHERE epoch_id=? AND ticket_id=?`, 7, "ticket-1").Scan(&target)) require.Equal(t, "target-a", target) } + +func TestRecheckPendingSubmittedAndFailureBudget(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + require.NoError(t, store.RecordPendingRecheckSubmission(ctx, 7, "ticket-7", "target", "challenged", "actual", audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_RECHECK_CONFIRMED_FAIL)) + has, err := store.HasRecheckSubmission(ctx, 7, "ticket-7") + require.NoError(t, err) + require.True(t, has) + require.NoError(t, store.MarkRecheckSubmissionSubmitted(ctx, 7, "ticket-7")) + + blocked, err := store.HasRecheckAttemptFailureBudgetExceeded(ctx, 7, "ticket-7", 2) + require.NoError(t, err) + require.False(t, blocked) + require.NoError(t, store.RecordRecheckAttemptFailure(ctx, 7, "ticket-7", "target", assert.AnError, time.Hour)) + blocked, err = store.HasRecheckAttemptFailureBudgetExceeded(ctx, 7, "ticket-7", 2) + require.NoError(t, err) + require.False(t, blocked) + require.NoError(t, store.RecordRecheckAttemptFailure(ctx, 7, "ticket-7", "target", assert.AnError, time.Hour)) + blocked, err = store.HasRecheckAttemptFailureBudgetExceeded(ctx, 7, "ticket-7", 2) + require.NoError(t, err) + require.True(t, blocked) +} diff --git a/pkg/storage/queries/self_healing_lep6.go b/pkg/storage/queries/self_healing_lep6.go index 87d8765e..24958e5c 100644 --- a/pkg/storage/queries/self_healing_lep6.go +++ b/pkg/storage/queries/self_healing_lep6.go @@ -16,9 +16,14 @@ import ( // keyed so every (heal_op_id) or (heal_op_id, verifier) is permitted exactly // once. type LEP6HealQueries interface { - // RecordHealClaim persists a successfully-submitted MsgClaimHealComplete - // for restart-time dedup. Returns ErrLEP6ClaimAlreadyRecorded if the - // heal_op_id row already exists (idempotent on retry). + // RecordPendingHealClaim pre-stages a heal claim before chain submit. + RecordPendingHealClaim(ctx context.Context, healOpID uint64, ticketID, manifestHash, stagingDir string) error + // MarkHealClaimSubmitted flips a pending claim to submitted after chain ack. + MarkHealClaimSubmitted(ctx context.Context, healOpID uint64) error + // DeletePendingHealClaim deletes only a pending claim after hard tx failure. + DeletePendingHealClaim(ctx context.Context, healOpID uint64) error + // RecordHealClaim persists a submitted MsgClaimHealComplete for restart-time + // dedup. Returns ErrLEP6ClaimAlreadyRecorded if the row already exists. RecordHealClaim(ctx context.Context, healOpID uint64, ticketID, manifestHash, stagingDir string) error // HasHealClaim reports whether RecordHealClaim has been called for this // heal_op_id. Used by the dispatcher to skip submission on restart. @@ -34,10 +39,13 @@ type LEP6HealQueries interface { // discarded the staging dir. DeleteHealClaim(ctx context.Context, healOpID uint64) error - // RecordHealVerification persists a successfully-submitted - // MsgSubmitHealVerification for restart-time dedup. Returns - // ErrLEP6VerificationAlreadyRecorded if the (heal_op_id, verifier_account) - // pair already exists. + // RecordPendingHealVerification pre-stages a verifier vote before chain submit. + RecordPendingHealVerification(ctx context.Context, healOpID uint64, verifierAccount string, verified bool, verificationHash string) error + // MarkHealVerificationSubmitted flips a pending vote to submitted after chain ack. + MarkHealVerificationSubmitted(ctx context.Context, healOpID uint64, verifierAccount string) error + // DeletePendingHealVerification deletes only a pending verifier row after hard tx failure. + DeletePendingHealVerification(ctx context.Context, healOpID uint64, verifierAccount string) error + // RecordHealVerification persists a submitted MsgSubmitHealVerification. RecordHealVerification(ctx context.Context, healOpID uint64, verifierAccount string, verified bool, verificationHash string) error // HasHealVerification reports whether the (heal_op_id, verifier_account) // row exists. Verifier dispatch uses this to skip resubmission on @@ -52,6 +60,7 @@ type HealClaimRecord struct { ManifestHash string StagingDir string SubmittedAt int64 + Status string } // ErrLEP6ClaimAlreadyRecorded is returned by RecordHealClaim when the @@ -68,23 +77,39 @@ CREATE TABLE IF NOT EXISTS heal_claims_submitted ( ticket_id TEXT NOT NULL, manifest_hash TEXT NOT NULL, staging_dir TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'submitted', submitted_at INTEGER NOT NULL );` +const createHealClaimsStatusIndex = `CREATE INDEX IF NOT EXISTS idx_heal_claims_status ON heal_claims_submitted(status);` +const alterHealClaimsSubmittedStatus = `ALTER TABLE heal_claims_submitted ADD COLUMN status TEXT NOT NULL DEFAULT 'submitted';` + const createHealVerificationsSubmitted = ` CREATE TABLE IF NOT EXISTS heal_verifications_submitted ( heal_op_id INTEGER NOT NULL, verifier_account TEXT NOT NULL, verified INTEGER NOT NULL, verification_hash TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'submitted', submitted_at INTEGER NOT NULL, PRIMARY KEY (heal_op_id, verifier_account) );` +const createHealVerificationsStatusIndex = `CREATE INDEX IF NOT EXISTS idx_heal_verifications_status ON heal_verifications_submitted(status);` +const alterHealVerificationsSubmittedStatus = `ALTER TABLE heal_verifications_submitted ADD COLUMN status TEXT NOT NULL DEFAULT 'submitted';` + +func (s *SQLiteStore) RecordPendingHealClaim(ctx context.Context, healOpID uint64, ticketID, manifestHash, stagingDir string) error { + return s.recordHealClaimWithStatus(ctx, healOpID, ticketID, manifestHash, stagingDir, "pending") +} + // RecordHealClaim — see LEP6HealQueries.RecordHealClaim. func (s *SQLiteStore) RecordHealClaim(ctx context.Context, healOpID uint64, ticketID, manifestHash, stagingDir string) error { - const stmt = `INSERT INTO heal_claims_submitted (heal_op_id, ticket_id, manifest_hash, staging_dir, submitted_at) VALUES (?, ?, ?, ?, ?)` - _, err := s.db.ExecContext(ctx, stmt, healOpID, ticketID, manifestHash, stagingDir, time.Now().Unix()) + return s.recordHealClaimWithStatus(ctx, healOpID, ticketID, manifestHash, stagingDir, "submitted") +} + +func (s *SQLiteStore) recordHealClaimWithStatus(ctx context.Context, healOpID uint64, ticketID, manifestHash, stagingDir, status string) error { + const stmt = `INSERT INTO heal_claims_submitted (heal_op_id, ticket_id, manifest_hash, staging_dir, status, submitted_at) VALUES (?, ?, ?, ?, ?, ?)` + _, err := s.db.ExecContext(ctx, stmt, healOpID, ticketID, manifestHash, stagingDir, status, time.Now().Unix()) if err != nil { if isSQLiteUniqueViolation(err) { return ErrLEP6ClaimAlreadyRecorded @@ -94,6 +119,16 @@ func (s *SQLiteStore) RecordHealClaim(ctx context.Context, healOpID uint64, tick return nil } +func (s *SQLiteStore) MarkHealClaimSubmitted(ctx context.Context, healOpID uint64) error { + _, err := s.db.ExecContext(ctx, `UPDATE heal_claims_submitted SET status = 'submitted', submitted_at = ? WHERE heal_op_id = ?`, time.Now().Unix(), healOpID) + return err +} + +func (s *SQLiteStore) DeletePendingHealClaim(ctx context.Context, healOpID uint64) error { + _, err := s.db.ExecContext(ctx, `DELETE FROM heal_claims_submitted WHERE heal_op_id = ? AND status = 'pending'`, healOpID) + return err +} + // HasHealClaim — see LEP6HealQueries.HasHealClaim. func (s *SQLiteStore) HasHealClaim(ctx context.Context, healOpID uint64) (bool, error) { const stmt = `SELECT 1 FROM heal_claims_submitted WHERE heal_op_id = ? LIMIT 1` @@ -110,15 +145,15 @@ func (s *SQLiteStore) HasHealClaim(ctx context.Context, healOpID uint64) (bool, // GetHealClaim — see LEP6HealQueries.GetHealClaim. func (s *SQLiteStore) GetHealClaim(ctx context.Context, healOpID uint64) (HealClaimRecord, error) { - const stmt = `SELECT heal_op_id, ticket_id, manifest_hash, staging_dir, submitted_at FROM heal_claims_submitted WHERE heal_op_id = ?` + const stmt = `SELECT heal_op_id, ticket_id, manifest_hash, staging_dir, submitted_at, status FROM heal_claims_submitted WHERE heal_op_id = ?` var r HealClaimRecord - err := s.db.QueryRowContext(ctx, stmt, healOpID).Scan(&r.HealOpID, &r.TicketID, &r.ManifestHash, &r.StagingDir, &r.SubmittedAt) + err := s.db.QueryRowContext(ctx, stmt, healOpID).Scan(&r.HealOpID, &r.TicketID, &r.ManifestHash, &r.StagingDir, &r.SubmittedAt, &r.Status) return r, err } // ListHealClaims — see LEP6HealQueries.ListHealClaims. func (s *SQLiteStore) ListHealClaims(ctx context.Context) ([]HealClaimRecord, error) { - const stmt = `SELECT heal_op_id, ticket_id, manifest_hash, staging_dir, submitted_at FROM heal_claims_submitted ORDER BY heal_op_id ASC` + const stmt = `SELECT heal_op_id, ticket_id, manifest_hash, staging_dir, submitted_at, status FROM heal_claims_submitted ORDER BY heal_op_id ASC` rows, err := s.db.QueryContext(ctx, stmt) if err != nil { return nil, err @@ -127,7 +162,7 @@ func (s *SQLiteStore) ListHealClaims(ctx context.Context) ([]HealClaimRecord, er out := make([]HealClaimRecord, 0) for rows.Next() { var r HealClaimRecord - if err := rows.Scan(&r.HealOpID, &r.TicketID, &r.ManifestHash, &r.StagingDir, &r.SubmittedAt); err != nil { + if err := rows.Scan(&r.HealOpID, &r.TicketID, &r.ManifestHash, &r.StagingDir, &r.SubmittedAt, &r.Status); err != nil { return nil, err } out = append(out, r) @@ -142,14 +177,22 @@ func (s *SQLiteStore) DeleteHealClaim(ctx context.Context, healOpID uint64) erro return err } +func (s *SQLiteStore) RecordPendingHealVerification(ctx context.Context, healOpID uint64, verifierAccount string, verified bool, verificationHash string) error { + return s.recordHealVerificationWithStatus(ctx, healOpID, verifierAccount, verified, verificationHash, "pending") +} + // RecordHealVerification — see LEP6HealQueries.RecordHealVerification. func (s *SQLiteStore) RecordHealVerification(ctx context.Context, healOpID uint64, verifierAccount string, verified bool, verificationHash string) error { - const stmt = `INSERT INTO heal_verifications_submitted (heal_op_id, verifier_account, verified, verification_hash, submitted_at) VALUES (?, ?, ?, ?, ?)` + return s.recordHealVerificationWithStatus(ctx, healOpID, verifierAccount, verified, verificationHash, "submitted") +} + +func (s *SQLiteStore) recordHealVerificationWithStatus(ctx context.Context, healOpID uint64, verifierAccount string, verified bool, verificationHash, status string) error { + const stmt = `INSERT INTO heal_verifications_submitted (heal_op_id, verifier_account, verified, verification_hash, status, submitted_at) VALUES (?, ?, ?, ?, ?, ?)` verifiedInt := 0 if verified { verifiedInt = 1 } - _, err := s.db.ExecContext(ctx, stmt, healOpID, verifierAccount, verifiedInt, verificationHash, time.Now().Unix()) + _, err := s.db.ExecContext(ctx, stmt, healOpID, verifierAccount, verifiedInt, verificationHash, status, time.Now().Unix()) if err != nil { if isSQLiteUniqueViolation(err) { return ErrLEP6VerificationAlreadyRecorded @@ -159,6 +202,16 @@ func (s *SQLiteStore) RecordHealVerification(ctx context.Context, healOpID uint6 return nil } +func (s *SQLiteStore) MarkHealVerificationSubmitted(ctx context.Context, healOpID uint64, verifierAccount string) error { + _, err := s.db.ExecContext(ctx, `UPDATE heal_verifications_submitted SET status = 'submitted', submitted_at = ? WHERE heal_op_id = ? AND verifier_account = ?`, time.Now().Unix(), healOpID, verifierAccount) + return err +} + +func (s *SQLiteStore) DeletePendingHealVerification(ctx context.Context, healOpID uint64, verifierAccount string) error { + _, err := s.db.ExecContext(ctx, `DELETE FROM heal_verifications_submitted WHERE heal_op_id = ? AND verifier_account = ? AND status = 'pending'`, healOpID, verifierAccount) + return err +} + // HasHealVerification — see LEP6HealQueries.HasHealVerification. func (s *SQLiteStore) HasHealVerification(ctx context.Context, healOpID uint64, verifierAccount string) (bool, error) { const stmt = `SELECT 1 FROM heal_verifications_submitted WHERE heal_op_id = ? AND verifier_account = ? LIMIT 1` diff --git a/pkg/storage/queries/self_healing_lep6_test.go b/pkg/storage/queries/self_healing_lep6_test.go index 6dbe8a4d..64fff02b 100644 --- a/pkg/storage/queries/self_healing_lep6_test.go +++ b/pkg/storage/queries/self_healing_lep6_test.go @@ -6,8 +6,9 @@ import ( "path/filepath" "testing" - _ "github.com/mattn/go-sqlite3" "github.com/jmoiron/sqlx" + _ "github.com/mattn/go-sqlite3" + "github.com/stretchr/testify/require" ) func newTestStore(t *testing.T) *SQLiteStore { @@ -18,7 +19,7 @@ func newTestStore(t *testing.T) *SQLiteStore { t.Fatalf("connect: %v", err) } t.Cleanup(func() { _ = db.Close() }) - for _, stmt := range []string{createHealClaimsSubmitted, createHealVerificationsSubmitted} { + for _, stmt := range []string{createHealClaimsSubmitted, createHealVerificationsSubmitted, createStorageRecheckSubmissions, createRecheckAttemptFailures, createRecheckAttemptFailuresExpiresIndex} { if _, err := db.Exec(stmt); err != nil { t.Fatalf("exec migration: %v", err) } @@ -86,3 +87,40 @@ func TestLEP6_HealVerification_PerVerifierDedup(t *testing.T) { t.Fatalf("HasHealVerification(sn-c) should be false: has=%v err=%v", has, err) } } + +func TestLEP6HealClaimPendingLifecycle(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + require.NoError(t, store.RecordPendingHealClaim(ctx, 101, "ticket-101", "manifest", "/tmp/stage")) + has, err := store.HasHealClaim(ctx, 101) + require.NoError(t, err) + require.True(t, has) + + err = store.RecordPendingHealClaim(ctx, 101, "ticket-101", "manifest", "/tmp/stage") + require.ErrorIs(t, err, ErrLEP6ClaimAlreadyRecorded) + + require.NoError(t, store.MarkHealClaimSubmitted(ctx, 101)) + claims, err := store.ListHealClaims(ctx) + require.NoError(t, err) + require.Len(t, claims, 1) + require.Equal(t, uint64(101), claims[0].HealOpID) +} + +func TestLEP6HealVerificationPendingLifecycle(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + require.NoError(t, store.RecordPendingHealVerification(ctx, 202, "verifier-a", true, "hash")) + has, err := store.HasHealVerification(ctx, 202, "verifier-a") + require.NoError(t, err) + require.True(t, has) + + err = store.RecordPendingHealVerification(ctx, 202, "verifier-a", true, "hash") + require.ErrorIs(t, err, ErrLEP6VerificationAlreadyRecorded) + + require.NoError(t, store.MarkHealVerificationSubmitted(ctx, 202, "verifier-a")) + has, err = store.HasHealVerification(ctx, 202, "verifier-a") + require.NoError(t, err) + require.True(t, has) +} diff --git a/pkg/storage/queries/sqlite.go b/pkg/storage/queries/sqlite.go index dea02e90..d34a96c3 100644 --- a/pkg/storage/queries/sqlite.go +++ b/pkg/storage/queries/sqlite.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/jmoiron/sqlx" @@ -292,21 +293,27 @@ func (s *SQLiteStore) CloseHistoryDB(ctx context.Context) { } } -// OpenHistoryDB opens history DB +// OpenHistoryDB opens history DB in the default supernode home. func OpenHistoryDB() (LocalStoreInterface, error) { - // Always use ~/.supernode as the base directory homeDir, err := os.UserHomeDir() if err != nil { return nil, fmt.Errorf("cannot get user home directory: %w", err) } - historyBasePath := filepath.Join(homeDir, ".supernode") + return OpenHistoryDBAt(filepath.Join(homeDir, ".supernode")) +} + +// OpenHistoryDBAt opens history DB under baseDir. +func OpenHistoryDBAt(baseDir string) (LocalStoreInterface, error) { + if strings.TrimSpace(baseDir) == "" { + return nil, fmt.Errorf("history db base directory is required") + } - // Ensure the base directory exists before opening the DB - if err := os.MkdirAll(historyBasePath, 0o755); err != nil { - return nil, fmt.Errorf("cannot create history db directory %q: %w", historyBasePath, err) + // Ensure the base directory exists before opening the DB. + if err := os.MkdirAll(baseDir, 0o755); err != nil { + return nil, fmt.Errorf("cannot create history db directory %q: %w", baseDir, err) } - dbFile := filepath.Join(historyBasePath, historyDBName) + dbFile := filepath.Join(baseDir, historyDBName) db, err := sqlx.Connect("sqlite3", dbFile) if err != nil { return nil, fmt.Errorf("cannot open sqlite database: %w", err) @@ -391,14 +398,32 @@ func OpenHistoryDB() (LocalStoreInterface, error) { if _, err := db.Exec(createHealClaimsSubmitted); err != nil { return nil, fmt.Errorf("cannot create heal_claims_submitted: %w", err) } + _, _ = db.Exec(alterHealClaimsSubmittedStatus) + if _, err := db.Exec(createHealClaimsStatusIndex); err != nil { + return nil, fmt.Errorf("cannot create heal_claims_submitted status index: %w", err) + } if _, err := db.Exec(createHealVerificationsSubmitted); err != nil { return nil, fmt.Errorf("cannot create heal_verifications_submitted: %w", err) } + _, _ = db.Exec(alterHealVerificationsSubmittedStatus) + if _, err := db.Exec(createHealVerificationsStatusIndex); err != nil { + return nil, fmt.Errorf("cannot create heal_verifications_submitted status index: %w", err) + } if _, err := db.Exec(createStorageRecheckSubmissions); err != nil { return nil, fmt.Errorf("cannot create storage_recheck_submissions: %w", err) } + _, _ = db.Exec(alterStorageRecheckSubmissionStatus) + if _, err := db.Exec(createStorageRecheckSubmissionStatusIndex); err != nil { + return nil, fmt.Errorf("cannot create storage_recheck_submissions status index: %w", err) + } + if _, err := db.Exec(createRecheckAttemptFailures); err != nil { + return nil, fmt.Errorf("cannot create recheck_attempt_failures: %w", err) + } + if _, err := db.Exec(createRecheckAttemptFailuresExpiresIndex); err != nil { + return nil, fmt.Errorf("cannot create recheck_attempt_failures expires index: %w", err) + } _, _ = db.Exec(alterTaskHistory) diff --git a/pkg/storage/queries/sqlite_open_test.go b/pkg/storage/queries/sqlite_open_test.go new file mode 100644 index 00000000..77aaae14 --- /dev/null +++ b/pkg/storage/queries/sqlite_open_test.go @@ -0,0 +1,27 @@ +package queries + +import ( + "context" + "os" + "path/filepath" + "testing" +) + +func TestOpenHistoryDBAtUsesConfiguredBaseDir(t *testing.T) { + baseDir := t.TempDir() + store, err := OpenHistoryDBAt(baseDir) + if err != nil { + t.Fatalf("OpenHistoryDBAt: %v", err) + } + t.Cleanup(func() { store.CloseHistoryDB(context.Background()) }) + + if _, err := os.Stat(filepath.Join(baseDir, historyDBName)); err != nil { + t.Fatalf("expected history db under configured base dir: %v", err) + } +} + +func TestOpenHistoryDBAtRejectsEmptyBaseDir(t *testing.T) { + if _, err := OpenHistoryDBAt(" "); err == nil { + t.Fatal("expected error for empty base dir") + } +} diff --git a/proto/supernode/status.proto b/proto/supernode/status.proto index d944d614..f78fe087 100644 --- a/proto/supernode/status.proto +++ b/proto/supernode/status.proto @@ -129,4 +129,37 @@ message StatusResponse { } P2PMetrics p2p_metrics = 9; + + // LEP-6 storage-truth runtime metrics and diagnostics. These are in-memory + // counters/gauges reset on process restart, matching the existing typed + // status-snapshot pattern used for P2P metrics. + message LEP6Metrics { + // Storage challenge / dispatch signals. + map dispatch_results_total = 1; + map dispatch_throttled_total = 2; + map dispatch_epoch_duration_millis_total = 3; + map dispatch_epoch_duration_millis_max = 4; + map dispatch_epoch_duration_count = 5; + map ticket_discovery_total = 6; + int64 no_ticket_provider_active = 7; + + // Self-healing signals. + map heal_claims_submitted_total = 8; + uint64 heal_claims_reconciled_total = 9; + map heal_verifications_submitted_total = 10; + uint64 heal_verifications_already_exists_total = 11; + uint64 heal_finalize_publishes_total = 12; + map heal_finalize_cleanups_total = 13; + int64 self_healing_pending_claims = 14; + int64 self_healing_staging_bytes = 15; + + // Storage recheck signals. + uint64 recheck_candidates_found_total = 16; + map recheck_evidence_submitted_total = 17; + uint64 recheck_evidence_already_submitted_total = 18; + map recheck_execution_failures_total = 19; + int64 recheck_pending_candidates = 20; + } + + LEP6Metrics lep6_metrics = 10; } diff --git a/supernode/cmd/helpers.go b/supernode/cmd/helpers.go index 0d51bc45..7cf612c3 100644 --- a/supernode/cmd/helpers.go +++ b/supernode/cmd/helpers.go @@ -63,9 +63,10 @@ func isValidBIP39WordCount(wordCount int) bool { // createP2PConfig creates a P2P config from the app config and address func createP2PConfig(config *config.Config, address string) *p2p.Config { return &p2p.Config{ - ListenAddress: config.SupernodeConfig.Host, - Port: config.P2PConfig.Port, - DataDir: config.GetP2PDataDir(), - ID: address, + ListenAddress: config.SupernodeConfig.Host, + Port: config.P2PConfig.Port, + DataDir: config.GetP2PDataDir(), + BootstrapNodes: config.P2PConfig.BootstrapNodes, + ID: address, } } diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 353e8b97..58cccaaf 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -204,7 +204,7 @@ The supernode will connect to the Lumera network and begin participating in the // logtrace.Info(ctx, "Metrics collection enabled", logtrace.Fields{}) // Storage challenge history DB (shared by the gRPC handler and runner). - historyStore, err := queries.OpenHistoryDB() + historyStore, err := queries.OpenHistoryDBAt(appConfig.BaseDir) if err != nil { logtrace.Fatal(ctx, "Failed to open history DB", logtrace.Fields{"error": err.Error()}) } @@ -260,7 +260,15 @@ The supernode will connect to the Lumera network and begin participating in the if appConfig.StorageChallengeConfig.LEP6.Recheck.Enabled { rc := appConfig.StorageChallengeConfig.LEP6.Recheck tickInterval := time.Duration(rc.TickIntervalMs) * time.Millisecond - recheckCfg := recheckService.Config{Enabled: true, LookbackEpochs: rc.LookbackEpochs, MaxPerTick: rc.MaxPerTick, TickInterval: tickInterval} + failureBackoffTTL := time.Duration(rc.FailureBackoffTTLms) * time.Millisecond + recheckCfg := recheckService.Config{ + Enabled: true, + LookbackEpochs: rc.LookbackEpochs, + MaxPerTick: rc.MaxPerTick, + TickInterval: tickInterval, + MaxFailureAttemptsPerTicket: rc.MaxFailureAttemptsPerTicket, + FailureBackoffTTL: failureBackoffTTL, + } attestor := recheckService.NewAttestor(appConfig.SupernodeConfig.Identity, lumeraClient.AuditMsg(), historyStore) reporterSource := recheckService.NewSupernodeReporterSource(lumeraClient.SuperNode(), appConfig.SupernodeConfig.Identity) recheckRunner, err = recheckService.NewServiceWithReporters(recheckCfg, lumeraClient.Audit(), historyStore, dispatcher, attestor, appConfig.SupernodeConfig.Identity, reporterSource) @@ -293,6 +301,8 @@ The supernode will connect to the Lumera network and begin participating in the StagingRoot: appConfig.SelfHealingConfig.StagingDir, VerifierFetchTimeout: fetchTimeout, VerifierFetchAttempts: appConfig.SelfHealingConfig.VerifierFetchAttempts, + VerifierBackoffBase: time.Duration(appConfig.SelfHealingConfig.VerifierBackoffBaseMs) * time.Millisecond, + AuditQueryTimeout: time.Duration(appConfig.SelfHealingConfig.AuditQueryTimeoutMs) * time.Millisecond, KeyName: appConfig.SupernodeConfig.KeyName, } fetcher := selfHealingService.NewSecureVerifierFetcher(lumeraClient, kr, appConfig.SupernodeConfig.Identity, appConfig.SupernodeConfig.Port) diff --git a/supernode/config.yml b/supernode/config.yml index 350650e2..a4c2b89d 100644 --- a/supernode/config.yml +++ b/supernode/config.yml @@ -32,3 +32,31 @@ storage_challenge: enabled: true poll_interval_ms: 5000 submit_evidence: true + lep6: + # Local challenger toggle only. Chain audit params remain the global + # protocol gate; STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED disables + # LEP-6 behavior even when this is true. + enabled: true + max_concurrent_targets: 4 + recipient_read_timeout: 30s + recheck: + enabled: true + lookback_epochs: 7 + max_per_tick: 5 + tick_interval_ms: 60000 + max_failure_attempts_per_ticket: 3 + failure_backoff_ttl_ms: 900000 + +# LEP-6 Self-Healing Configuration +self_healing: + # Local healer/verifier/finalizer toggle only; chain mode remains the + # global protocol gate. + enabled: true + poll_interval_ms: 30000 + max_concurrent_reconstructs: 2 + max_concurrent_verifications: 4 + max_concurrent_publishes: 2 + staging_dir: "heal-staging" + verifier_fetch_timeout_ms: 60000 + verifier_fetch_attempts: 3 + verifier_backoff_base_ms: 2000 diff --git a/supernode/config/config.go b/supernode/config/config.go index 1d1327ea..ed3177b1 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -31,8 +31,9 @@ type KeyringConfig struct { } type P2PConfig struct { - Port uint16 `yaml:"port"` - DataDir string `yaml:"data_dir"` + Port uint16 `yaml:"port"` + DataDir string `yaml:"data_dir"` + BootstrapNodes string `yaml:"bootstrap_nodes,omitempty"` } type LumeraClientConfig struct { @@ -79,9 +80,14 @@ type StorageChallengeConfig struct { // flow via x/audit Params and are deliberately omitted here. See // docs/plans/LEP6_SUPERNODE_IMPLEMENTATION_PLAN_v2.md §2.3. type StorageChallengeLEP6Config struct { + // enabledSet tracks whether YAML explicitly provided enabled. Plain bools + // cannot distinguish omitted from explicit false, but LEP-6 needs both safe + // default-on local toggles and emergency-disable `enabled: false`. + enabledSet bool `yaml:"-"` + // Enabled gates construction of the LEP6Dispatcher. When false, the - // legacy single-range loop runs alone (default true; PR3 ships LEP-6 - // alongside the legacy loop with internal mode-gating). + // legacy single-range loop runs alone (default true; the chain audit + // StorageTruthEnforcementMode remains the protocol source of truth). Enabled bool `yaml:"enabled"` // MaxConcurrentTargets bounds parallelism inside DispatchEpoch. // Default 4. Reserved for follow-up parallelism work; PR3 dispatch @@ -95,10 +101,17 @@ type StorageChallengeLEP6Config struct { } type StorageRecheckConfig struct { + enabledSet bool `yaml:"-"` + Enabled bool `yaml:"enabled"` LookbackEpochs uint64 `yaml:"lookback_epochs,omitempty"` MaxPerTick int `yaml:"max_per_tick,omitempty"` TickIntervalMs int `yaml:"tick_interval_ms,omitempty"` + // MaxFailureAttemptsPerTicket bounds repeated failed recheck attempts for + // one epoch/ticket before the candidate is temporarily skipped. + MaxFailureAttemptsPerTicket int `yaml:"max_failure_attempts_per_ticket,omitempty"` + // FailureBackoffTTLms is the TTL for recorded recheck attempt failures. + FailureBackoffTTLms int `yaml:"failure_backoff_ttl_ms,omitempty"` } // SelfHealingConfig configures the LEP-6 chain-driven self-healing runtime @@ -106,8 +119,12 @@ type StorageRecheckConfig struct { // the chain's StorageTruthEnforcementMode param — UNSPECIFIED skips the // dispatcher regardless of Enabled. type SelfHealingConfig struct { + // enabledSet tracks explicit YAML emergency-disable vs omitted default. + enabledSet bool `yaml:"-"` + // Enabled toggles the dispatcher and the §19 transport server. Default - // false until activation rollout (PR-6). + // true; chain StorageTruthEnforcementMode=UNSPECIFIED remains the global + // protocol disable. Enabled bool `yaml:"enabled"` // PollIntervalMs is the dispatcher tick cadence (default 30000). PollIntervalMs int `yaml:"poll_interval_ms,omitempty"` @@ -127,6 +144,12 @@ type SelfHealingConfig struct { // VerifierFetchAttempts bounds retries when fetching from healer // (default 3). VerifierFetchAttempts int `yaml:"verifier_fetch_attempts,omitempty"` + // VerifierBackoffBaseMs is the exponential retry backoff base between + // healer fetch attempts (default 2000). + VerifierBackoffBaseMs int `yaml:"verifier_backoff_base_ms,omitempty"` + // AuditQueryTimeoutMs bounds each dispatcher chain query so one wedged + // status/params call cannot starve verifier/finalizer work (default 10000). + AuditQueryTimeoutMs int `yaml:"audit_query_timeout_ms,omitempty"` } type Config struct { @@ -230,6 +253,9 @@ func LoadConfig(filename string, baseDir string) (*Config, error) { if config.StorageChallengeConfig.PollIntervalMs == 0 { config.StorageChallengeConfig.PollIntervalMs = DefaultStorageChallengePollIntervalMs } + if err := config.applyLEP6DefaultsAndValidate(); err != nil { + return nil, err + } // Create directories if err := config.EnsureDirs(); err != nil { diff --git a/supernode/config/config_lep6_test.go b/supernode/config/config_lep6_test.go new file mode 100644 index 00000000..b1a4712d --- /dev/null +++ b/supernode/config/config_lep6_test.go @@ -0,0 +1,244 @@ +package config + +import ( + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +func TestLoadConfig_LEP6SafeDefaults(t *testing.T) { + t.Parallel() + + cfg := loadConfigFromBody(t, ` +supernode: + key_name: test-key + identity: lumera1identity000000000000000000000000000000 + host: 0.0.0.0 + port: 4444 +keyring: + backend: test + dir: keys +p2p: + port: 4445 + data_dir: data/p2p +lumera: + grpc_addr: localhost:9090 + chain_id: testing +raptorq: + files_dir: raptorq_files +storage_challenge: + enabled: true +`) + + if !cfg.StorageChallengeConfig.LEP6.Enabled { + t.Fatalf("storage_challenge.lep6.enabled default = false, want true so chain mode remains protocol source of truth") + } + if cfg.StorageChallengeConfig.LEP6.MaxConcurrentTargets != DefaultLEP6MaxConcurrentTargets { + t.Fatalf("max_concurrent_targets = %d, want %d", cfg.StorageChallengeConfig.LEP6.MaxConcurrentTargets, DefaultLEP6MaxConcurrentTargets) + } + if cfg.StorageChallengeConfig.LEP6.RecipientReadTimeout != DefaultLEP6RecipientReadTimeout { + t.Fatalf("recipient_read_timeout = %s, want %s", cfg.StorageChallengeConfig.LEP6.RecipientReadTimeout, DefaultLEP6RecipientReadTimeout) + } + if !cfg.StorageChallengeConfig.LEP6.Recheck.Enabled { + t.Fatalf("storage_challenge.lep6.recheck.enabled default = false, want true") + } + if cfg.StorageChallengeConfig.LEP6.Recheck.LookbackEpochs != DefaultLEP6RecheckLookbackEpochs { + t.Fatalf("recheck.lookback_epochs = %d, want %d", cfg.StorageChallengeConfig.LEP6.Recheck.LookbackEpochs, DefaultLEP6RecheckLookbackEpochs) + } + if cfg.StorageChallengeConfig.LEP6.Recheck.MaxPerTick != DefaultLEP6RecheckMaxPerTick { + t.Fatalf("recheck.max_per_tick = %d, want %d", cfg.StorageChallengeConfig.LEP6.Recheck.MaxPerTick, DefaultLEP6RecheckMaxPerTick) + } + if cfg.StorageChallengeConfig.LEP6.Recheck.TickIntervalMs != int(DefaultLEP6RecheckTickInterval/time.Millisecond) { + t.Fatalf("recheck.tick_interval_ms = %d, want %d", cfg.StorageChallengeConfig.LEP6.Recheck.TickIntervalMs, int(DefaultLEP6RecheckTickInterval/time.Millisecond)) + } + if cfg.StorageChallengeConfig.LEP6.Recheck.MaxFailureAttemptsPerTicket != DefaultLEP6RecheckMaxFailureAttemptsPerTicket { + t.Fatalf("recheck.max_failure_attempts_per_ticket = %d, want %d", cfg.StorageChallengeConfig.LEP6.Recheck.MaxFailureAttemptsPerTicket, DefaultLEP6RecheckMaxFailureAttemptsPerTicket) + } + if cfg.StorageChallengeConfig.LEP6.Recheck.FailureBackoffTTLms != int(DefaultLEP6RecheckFailureBackoffTTL/time.Millisecond) { + t.Fatalf("recheck.failure_backoff_ttl_ms = %d, want %d", cfg.StorageChallengeConfig.LEP6.Recheck.FailureBackoffTTLms, int(DefaultLEP6RecheckFailureBackoffTTL/time.Millisecond)) + } + + if !cfg.SelfHealingConfig.Enabled { + t.Fatalf("self_healing.enabled default = false, want true so chain UNSPECIFIED is the global protocol gate") + } + if cfg.SelfHealingConfig.PollIntervalMs != int(DefaultSelfHealingPollInterval/time.Millisecond) { + t.Fatalf("self_healing.poll_interval_ms = %d, want %d", cfg.SelfHealingConfig.PollIntervalMs, int(DefaultSelfHealingPollInterval/time.Millisecond)) + } + if cfg.SelfHealingConfig.MaxConcurrentReconstructs != DefaultSelfHealingMaxConcurrentReconstructs { + t.Fatalf("self_healing.max_concurrent_reconstructs = %d, want %d", cfg.SelfHealingConfig.MaxConcurrentReconstructs, DefaultSelfHealingMaxConcurrentReconstructs) + } + if cfg.SelfHealingConfig.MaxConcurrentVerifications != DefaultSelfHealingMaxConcurrentVerifications { + t.Fatalf("self_healing.max_concurrent_verifications = %d, want %d", cfg.SelfHealingConfig.MaxConcurrentVerifications, DefaultSelfHealingMaxConcurrentVerifications) + } + if cfg.SelfHealingConfig.MaxConcurrentPublishes != DefaultSelfHealingMaxConcurrentPublishes { + t.Fatalf("self_healing.max_concurrent_publishes = %d, want %d", cfg.SelfHealingConfig.MaxConcurrentPublishes, DefaultSelfHealingMaxConcurrentPublishes) + } + if cfg.SelfHealingConfig.StagingDir != DefaultSelfHealingStagingDir { + t.Fatalf("self_healing.staging_dir = %q, want %q", cfg.SelfHealingConfig.StagingDir, DefaultSelfHealingStagingDir) + } + if cfg.SelfHealingConfig.VerifierFetchTimeoutMs != int(DefaultSelfHealingVerifierFetchTimeout/time.Millisecond) { + t.Fatalf("self_healing.verifier_fetch_timeout_ms = %d, want %d", cfg.SelfHealingConfig.VerifierFetchTimeoutMs, int(DefaultSelfHealingVerifierFetchTimeout/time.Millisecond)) + } + if cfg.SelfHealingConfig.VerifierFetchAttempts != DefaultSelfHealingVerifierFetchAttempts { + t.Fatalf("self_healing.verifier_fetch_attempts = %d, want %d", cfg.SelfHealingConfig.VerifierFetchAttempts, DefaultSelfHealingVerifierFetchAttempts) + } + if cfg.SelfHealingConfig.VerifierBackoffBaseMs != int(DefaultSelfHealingVerifierBackoffBase/time.Millisecond) { + t.Fatalf("self_healing.verifier_backoff_base_ms = %d, want %d", cfg.SelfHealingConfig.VerifierBackoffBaseMs, int(DefaultSelfHealingVerifierBackoffBase/time.Millisecond)) + } +} + +func TestLoadConfig_LEP6EmergencyDisablesRemainFalse(t *testing.T) { + t.Parallel() + + cfg := loadConfigFromBody(t, ` +supernode: + key_name: test-key + identity: lumera1identity000000000000000000000000000000 + host: 0.0.0.0 + port: 4444 +keyring: + backend: test + dir: keys +p2p: + port: 4445 + data_dir: data/p2p +lumera: + grpc_addr: localhost:9090 + chain_id: testing +raptorq: + files_dir: raptorq_files +storage_challenge: + enabled: true + lep6: + enabled: false + recheck: + enabled: false +self_healing: + enabled: false +`) + + if cfg.StorageChallengeConfig.LEP6.Enabled { + t.Fatalf("storage_challenge.lep6.enabled = true, want explicit false emergency disable preserved") + } + if cfg.StorageChallengeConfig.LEP6.Recheck.Enabled { + t.Fatalf("storage_challenge.lep6.recheck.enabled = true, want explicit false emergency disable preserved") + } + if cfg.SelfHealingConfig.Enabled { + t.Fatalf("self_healing.enabled = true, want explicit false emergency disable preserved") + } +} + +func TestLoadConfig_LEP6InvalidNegativeKnobsRejected(t *testing.T) { + t.Parallel() + + cases := map[string]string{ + "dispatcher-targets": "storage_challenge:\n enabled: true\n lep6:\n max_concurrent_targets: -1\n", + "dispatcher-timeout": "storage_challenge:\n enabled: true\n lep6:\n recipient_read_timeout: -1s\n", + "recheck-max": "storage_challenge:\n enabled: true\n lep6:\n recheck:\n max_per_tick: -1\n", + "recheck-ttl": "storage_challenge:\n enabled: true\n lep6:\n recheck:\n failure_backoff_ttl_ms: -1\n", + "healing-poll": "storage_challenge:\n enabled: true\nself_healing:\n poll_interval_ms: -1\n", + "healing-backoff": "storage_challenge:\n enabled: true\nself_healing:\n verifier_backoff_base_ms: -1\n", + } + + for name, override := range cases { + name, override := name, override + t.Run(name, func(t *testing.T) { + t.Parallel() + body := baseConfigYAML() + override + dir := t.TempDir() + path := filepath.Join(dir, "supernode.yml") + if err := os.WriteFile(path, []byte(body), 0o600); err != nil { + t.Fatalf("write yaml: %v", err) + } + _, err := LoadConfig(path, dir) + if err == nil { + t.Fatalf("LoadConfig succeeded, want validation error") + } + if !strings.Contains(err.Error(), "LEP-6") { + t.Fatalf("error = %v, want LEP-6 validation context", err) + } + }) + } +} + +func TestCreateDefaultConfig_IncludesExplicitLEP6Blocks(t *testing.T) { + t.Parallel() + + cfg := CreateDefaultConfig("test-key", "lumera1identity", "testing", "test", "keys", "", "", "") + if !cfg.StorageChallengeConfig.LEP6.Enabled || !cfg.StorageChallengeConfig.LEP6.Recheck.Enabled || !cfg.SelfHealingConfig.Enabled { + t.Fatalf("default config should explicitly include enabled LEP-6 local toggles behind chain mode gate: %+v", cfg) + } + if cfg.SelfHealingConfig.StagingDir == "" { + t.Fatalf("default config missing self_healing.staging_dir") + } +} + +func TestSystemConfigFixturesIncludeLEP6(t *testing.T) { + t.Parallel() + + fixtures := []string{ + "../../tests/system/config.lep6-1.yml", + "../../tests/system/config.lep6-2.yml", + "../../tests/system/config.lep6-3.yml", + } + for _, fixture := range fixtures { + fixture := fixture + t.Run(filepath.Base(fixture), func(t *testing.T) { + t.Parallel() + raw, err := os.ReadFile(fixture) + if err != nil { + t.Fatalf("read fixture: %v", err) + } + body := string(raw) + for _, want := range []string{"storage_challenge:", "lep6:", "recheck:", "self_healing:"} { + if !strings.Contains(body, want) { + t.Fatalf("fixture %s missing %q", fixture, want) + } + } + cfg, err := LoadConfig(fixture, t.TempDir()) + if err != nil { + t.Fatalf("LoadConfig(%s): %v", fixture, err) + } + if !cfg.StorageChallengeConfig.LEP6.Recheck.Enabled || !cfg.SelfHealingConfig.Enabled { + t.Fatalf("fixture should enable LEP-6 recheck/self-healing runtimes behind chain mode gate: %+v", cfg) + } + }) + } +} + +func loadConfigFromBody(t *testing.T, body string) *Config { + t.Helper() + dir := t.TempDir() + path := filepath.Join(dir, "supernode.yml") + if err := os.WriteFile(path, []byte(body), 0o600); err != nil { + t.Fatalf("write yaml: %v", err) + } + cfg, err := LoadConfig(path, dir) + if err != nil { + t.Fatalf("LoadConfig: %v", err) + } + return cfg +} + +func baseConfigYAML() string { + return ` +supernode: + key_name: test-key + identity: lumera1identity000000000000000000000000000000 + host: 0.0.0.0 + port: 4444 +keyring: + backend: test + dir: keys +p2p: + port: 4445 + data_dir: data/p2p +lumera: + grpc_addr: localhost:9090 + chain_id: testing +raptorq: + files_dir: raptorq_files +` +} diff --git a/supernode/config/defaults.go b/supernode/config/defaults.go index e2cbedc7..1f30fdd3 100644 --- a/supernode/config/defaults.go +++ b/supernode/config/defaults.go @@ -1,5 +1,7 @@ package config +import "time" + // Centralized default values for configuration const ( @@ -13,4 +15,23 @@ const ( DefaultChainID = "testing" DefaultRaptorQFilesDir = "raptorq_files" DefaultStorageChallengePollIntervalMs = 5000 + + DefaultLEP6MaxConcurrentTargets = 4 + DefaultLEP6RecipientReadTimeout = 30 * time.Second + + DefaultLEP6RecheckLookbackEpochs = uint64(7) + DefaultLEP6RecheckMaxPerTick = 5 + DefaultLEP6RecheckTickInterval = time.Minute + DefaultLEP6RecheckMaxFailureAttemptsPerTicket = 3 + DefaultLEP6RecheckFailureBackoffTTL = 15 * time.Minute + + DefaultSelfHealingPollInterval = 30 * time.Second + DefaultSelfHealingMaxConcurrentReconstructs = 2 + DefaultSelfHealingMaxConcurrentVerifications = 4 + DefaultSelfHealingMaxConcurrentPublishes = 2 + DefaultSelfHealingStagingDir = "heal-staging" + DefaultSelfHealingVerifierFetchTimeout = 60 * time.Second + DefaultSelfHealingVerifierFetchAttempts = 3 + DefaultSelfHealingVerifierBackoffBase = 2 * time.Second + DefaultSelfHealingAuditQueryTimeout = 10 * time.Second ) diff --git a/supernode/config/lep6.go b/supernode/config/lep6.go new file mode 100644 index 00000000..4ee302c0 --- /dev/null +++ b/supernode/config/lep6.go @@ -0,0 +1,191 @@ +package config + +import ( + "fmt" + "strings" + "time" + + "gopkg.in/yaml.v3" +) + +func (c *Config) UnmarshalYAML(value *yaml.Node) error { + type raw struct { + SupernodeConfig SupernodeConfig `yaml:"supernode"` + KeyringConfig KeyringConfig `yaml:"keyring"` + P2PConfig P2PConfig `yaml:"p2p"` + LumeraClientConfig LumeraClientConfig `yaml:"lumera"` + RaptorQConfig RaptorQConfig `yaml:"raptorq"` + StorageChallengeConfig StorageChallengeConfig `yaml:"storage_challenge"` + SelfHealingConfig SelfHealingConfig `yaml:"self_healing"` + } + var out raw + if err := value.Decode(&out); err != nil { + return err + } + c.SupernodeConfig = out.SupernodeConfig + c.KeyringConfig = out.KeyringConfig + c.P2PConfig = out.P2PConfig + c.LumeraClientConfig = out.LumeraClientConfig + c.RaptorQConfig = out.RaptorQConfig + c.StorageChallengeConfig = out.StorageChallengeConfig + c.SelfHealingConfig = out.SelfHealingConfig + return nil +} + +func (c *StorageChallengeLEP6Config) UnmarshalYAML(value *yaml.Node) error { + type raw StorageChallengeLEP6Config + var out raw + if err := value.Decode(&out); err != nil { + return err + } + *c = StorageChallengeLEP6Config(out) + c.enabledSet = hasYAMLKey(value, "enabled") + return nil +} + +func (c *StorageRecheckConfig) UnmarshalYAML(value *yaml.Node) error { + type raw StorageRecheckConfig + var out raw + if err := value.Decode(&out); err != nil { + return err + } + *c = StorageRecheckConfig(out) + c.enabledSet = hasYAMLKey(value, "enabled") + return nil +} + +func (c *SelfHealingConfig) UnmarshalYAML(value *yaml.Node) error { + type raw SelfHealingConfig + var out raw + if err := value.Decode(&out); err != nil { + return err + } + *c = SelfHealingConfig(out) + c.enabledSet = hasYAMLKey(value, "enabled") + return nil +} + +func hasYAMLKey(value *yaml.Node, key string) bool { + if value == nil || value.Kind != yaml.MappingNode { + return false + } + for i := 0; i+1 < len(value.Content); i += 2 { + if value.Content[i].Value == key { + return true + } + } + return false +} + +func (c *Config) applyLEP6DefaultsAndValidate() error { + if !c.StorageChallengeConfig.LEP6.enabledSet { + c.StorageChallengeConfig.LEP6.Enabled = true + } + if c.StorageChallengeConfig.LEP6.MaxConcurrentTargets == 0 { + c.StorageChallengeConfig.LEP6.MaxConcurrentTargets = DefaultLEP6MaxConcurrentTargets + } + if c.StorageChallengeConfig.LEP6.RecipientReadTimeout == 0 { + c.StorageChallengeConfig.LEP6.RecipientReadTimeout = DefaultLEP6RecipientReadTimeout + } + + recheck := &c.StorageChallengeConfig.LEP6.Recheck + if !recheck.enabledSet { + recheck.Enabled = true + } + if recheck.LookbackEpochs == 0 { + recheck.LookbackEpochs = DefaultLEP6RecheckLookbackEpochs + } + if recheck.MaxPerTick == 0 { + recheck.MaxPerTick = DefaultLEP6RecheckMaxPerTick + } + if recheck.TickIntervalMs == 0 { + recheck.TickIntervalMs = int(DefaultLEP6RecheckTickInterval / time.Millisecond) + } + if recheck.MaxFailureAttemptsPerTicket == 0 { + recheck.MaxFailureAttemptsPerTicket = DefaultLEP6RecheckMaxFailureAttemptsPerTicket + } + if recheck.FailureBackoffTTLms == 0 { + recheck.FailureBackoffTTLms = int(DefaultLEP6RecheckFailureBackoffTTL / time.Millisecond) + } + + if !c.SelfHealingConfig.enabledSet { + c.SelfHealingConfig.Enabled = true + } + if c.SelfHealingConfig.PollIntervalMs == 0 { + c.SelfHealingConfig.PollIntervalMs = int(DefaultSelfHealingPollInterval / time.Millisecond) + } + if c.SelfHealingConfig.MaxConcurrentReconstructs == 0 { + c.SelfHealingConfig.MaxConcurrentReconstructs = DefaultSelfHealingMaxConcurrentReconstructs + } + if c.SelfHealingConfig.MaxConcurrentVerifications == 0 { + c.SelfHealingConfig.MaxConcurrentVerifications = DefaultSelfHealingMaxConcurrentVerifications + } + if c.SelfHealingConfig.MaxConcurrentPublishes == 0 { + c.SelfHealingConfig.MaxConcurrentPublishes = DefaultSelfHealingMaxConcurrentPublishes + } + if strings.TrimSpace(c.SelfHealingConfig.StagingDir) == "" { + c.SelfHealingConfig.StagingDir = DefaultSelfHealingStagingDir + } + if c.SelfHealingConfig.VerifierFetchTimeoutMs == 0 { + c.SelfHealingConfig.VerifierFetchTimeoutMs = int(DefaultSelfHealingVerifierFetchTimeout / time.Millisecond) + } + if c.SelfHealingConfig.VerifierFetchAttempts == 0 { + c.SelfHealingConfig.VerifierFetchAttempts = DefaultSelfHealingVerifierFetchAttempts + } + if c.SelfHealingConfig.VerifierBackoffBaseMs == 0 { + c.SelfHealingConfig.VerifierBackoffBaseMs = int(DefaultSelfHealingVerifierBackoffBase / time.Millisecond) + } + if c.SelfHealingConfig.AuditQueryTimeoutMs == 0 { + c.SelfHealingConfig.AuditQueryTimeoutMs = int(DefaultSelfHealingAuditQueryTimeout / time.Millisecond) + } + + return c.validateLEP6Config() +} + +func (c *Config) validateLEP6Config() error { + lep6 := c.StorageChallengeConfig.LEP6 + if lep6.MaxConcurrentTargets < 0 { + return fmt.Errorf("LEP-6 config: storage_challenge.lep6.max_concurrent_targets must be >= 0") + } + if lep6.RecipientReadTimeout < 0 { + return fmt.Errorf("LEP-6 config: storage_challenge.lep6.recipient_read_timeout must be >= 0") + } + if lep6.Recheck.MaxPerTick < 0 { + return fmt.Errorf("LEP-6 config: storage_challenge.lep6.recheck.max_per_tick must be >= 0") + } + if lep6.Recheck.TickIntervalMs < 0 { + return fmt.Errorf("LEP-6 config: storage_challenge.lep6.recheck.tick_interval_ms must be >= 0") + } + if lep6.Recheck.MaxFailureAttemptsPerTicket < 0 { + return fmt.Errorf("LEP-6 config: storage_challenge.lep6.recheck.max_failure_attempts_per_ticket must be >= 0") + } + if lep6.Recheck.FailureBackoffTTLms < 0 { + return fmt.Errorf("LEP-6 config: storage_challenge.lep6.recheck.failure_backoff_ttl_ms must be >= 0") + } + sh := c.SelfHealingConfig + if sh.PollIntervalMs < 0 { + return fmt.Errorf("LEP-6 config: self_healing.poll_interval_ms must be >= 0") + } + if sh.MaxConcurrentReconstructs < 0 { + return fmt.Errorf("LEP-6 config: self_healing.max_concurrent_reconstructs must be >= 0") + } + if sh.MaxConcurrentVerifications < 0 { + return fmt.Errorf("LEP-6 config: self_healing.max_concurrent_verifications must be >= 0") + } + if sh.MaxConcurrentPublishes < 0 { + return fmt.Errorf("LEP-6 config: self_healing.max_concurrent_publishes must be >= 0") + } + if sh.VerifierFetchTimeoutMs < 0 { + return fmt.Errorf("LEP-6 config: self_healing.verifier_fetch_timeout_ms must be >= 0") + } + if sh.VerifierFetchAttempts < 0 { + return fmt.Errorf("LEP-6 config: self_healing.verifier_fetch_attempts must be >= 0") + } + if sh.VerifierBackoffBaseMs < 0 { + return fmt.Errorf("LEP-6 config: self_healing.verifier_backoff_base_ms must be >= 0") + } + if sh.AuditQueryTimeoutMs < 0 { + return fmt.Errorf("LEP-6 config: self_healing.audit_query_timeout_ms must be >= 0") + } + return nil +} diff --git a/supernode/config/save.go b/supernode/config/save.go index dfa88b7f..80b58ab8 100644 --- a/supernode/config/save.go +++ b/supernode/config/save.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "time" "gopkg.in/yaml.v3" ) @@ -61,6 +62,30 @@ func CreateDefaultConfig(keyName, identity, chainID string, keyringBackend, keyr Enabled: true, PollIntervalMs: DefaultStorageChallengePollIntervalMs, SubmitEvidence: true, + LEP6: StorageChallengeLEP6Config{ + Enabled: true, + MaxConcurrentTargets: DefaultLEP6MaxConcurrentTargets, + RecipientReadTimeout: DefaultLEP6RecipientReadTimeout, + Recheck: StorageRecheckConfig{ + Enabled: true, + LookbackEpochs: DefaultLEP6RecheckLookbackEpochs, + MaxPerTick: DefaultLEP6RecheckMaxPerTick, + TickIntervalMs: int(DefaultLEP6RecheckTickInterval / time.Millisecond), + MaxFailureAttemptsPerTicket: DefaultLEP6RecheckMaxFailureAttemptsPerTicket, + FailureBackoffTTLms: int(DefaultLEP6RecheckFailureBackoffTTL / time.Millisecond), + }, + }, + }, + SelfHealingConfig: SelfHealingConfig{ + Enabled: true, + PollIntervalMs: int(DefaultSelfHealingPollInterval / time.Millisecond), + MaxConcurrentReconstructs: DefaultSelfHealingMaxConcurrentReconstructs, + MaxConcurrentVerifications: DefaultSelfHealingMaxConcurrentVerifications, + MaxConcurrentPublishes: DefaultSelfHealingMaxConcurrentPublishes, + StagingDir: DefaultSelfHealingStagingDir, + VerifierFetchTimeoutMs: int(DefaultSelfHealingVerifierFetchTimeout / time.Millisecond), + VerifierFetchAttempts: DefaultSelfHealingVerifierFetchAttempts, + VerifierBackoffBaseMs: int(DefaultSelfHealingVerifierBackoffBase / time.Millisecond), }, } } diff --git a/supernode/recheck/attestor.go b/supernode/recheck/attestor.go index 069809c9..eb244383 100644 --- a/supernode/recheck/attestor.go +++ b/supernode/recheck/attestor.go @@ -6,6 +6,7 @@ import ( "strings" audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" sdktx "github.com/cosmos/cosmos-sdk/types/tx" ) @@ -33,14 +34,27 @@ func (a *Attestor) Submit(ctx context.Context, c Candidate, r RecheckResult) err if strings.TrimSpace(r.TranscriptHash) == "" || !validRecheckResultClass(r.ResultClass) { return fmt.Errorf("invalid recheck result") } + if err := a.store.RecordPendingRecheckSubmission(ctx, c.EpochID, c.TicketID, c.TargetAccount, c.ChallengedTranscriptHash, r.TranscriptHash, r.ResultClass); err != nil { + lep6metrics.IncRecheckSubmission(r.ResultClass.String(), "stage_error") + return fmt.Errorf("stage recheck evidence before submit: %w", err) + } _, err := a.msg.SubmitStorageRecheckEvidence(ctx, c.EpochID, c.TargetAccount, c.TicketID, c.ChallengedTranscriptHash, r.TranscriptHash, r.ResultClass, r.Details) if err != nil { if isAlreadySubmittedError(err) { - return a.store.RecordRecheckSubmission(ctx, c.EpochID, c.TicketID, c.TargetAccount, c.ChallengedTranscriptHash, r.TranscriptHash, r.ResultClass) + lep6metrics.IncRecheckAlreadySubmitted() + lep6metrics.IncRecheckSubmission(r.ResultClass.String(), "already_submitted") + return a.store.MarkRecheckSubmissionSubmitted(ctx, c.EpochID, c.TicketID) } + _ = a.store.DeletePendingRecheckSubmission(ctx, c.EpochID, c.TicketID) + lep6metrics.IncRecheckSubmission(r.ResultClass.String(), "submit_error") + return err + } + if err := a.store.MarkRecheckSubmissionSubmitted(ctx, c.EpochID, c.TicketID); err != nil { + lep6metrics.IncRecheckSubmission(r.ResultClass.String(), "mark_error") return err } - return a.store.RecordRecheckSubmission(ctx, c.EpochID, c.TicketID, c.TargetAccount, c.ChallengedTranscriptHash, r.TranscriptHash, r.ResultClass) + lep6metrics.IncRecheckSubmission(r.ResultClass.String(), "submitted") + return nil } func validRecheckResultClass(cls audittypes.StorageProofResultClass) bool { diff --git a/supernode/recheck/attestor_test.go b/supernode/recheck/attestor_test.go index ba7a9729..d3988568 100644 --- a/supernode/recheck/attestor_test.go +++ b/supernode/recheck/attestor_test.go @@ -21,8 +21,8 @@ func TestAttestor_SubmitsThenPersists(t *testing.T) { require.NoError(t, a.Submit(ctx, candidate, result)) require.Len(t, msg.calls, 1) - require.Equal(t, 1, msg.calls[0].callIndex) - require.Greater(t, store.recordCallIndex, msg.calls[0].callIndex) + require.Equal(t, 2, msg.calls[0].callIndex) + require.Less(t, store.recordCallIndex, msg.calls[0].callIndex) exists, err := store.HasRecheckSubmission(ctx, 7, "ticket-1") require.NoError(t, err) require.True(t, exists) diff --git a/supernode/recheck/finder_service_test.go b/supernode/recheck/finder_service_test.go index e1316308..17dda358 100644 --- a/supernode/recheck/finder_service_test.go +++ b/supernode/recheck/finder_service_test.go @@ -109,6 +109,21 @@ func TestService_TickModeGateAndSubmit(t *testing.T) { require.Equal(t, "target", msg.calls[0].target) } +func TestService_TickSkipsRecheckWhenFailureBudgetExhausted(t *testing.T) { + ctx := context.Background() + store := newMemoryStore() + store.failures[key(10, "t")] = 2 + msg := &recordingAuditMsg{} + a := &stubAudit{current: 10, mode: audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL, reports: map[uint64]audittypes.EpochReport{10: {StorageProofResults: []*audittypes.StorageProofResult{resFrom("peer", "t", "target", "h", audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_HASH_MISMATCH)}}}} + r := &stubRechecker{result: RecheckResult{TranscriptHash: "rh", ResultClass: audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_PASS}} + svc, err := NewService(Config{Enabled: true, TickInterval: time.Millisecond, MaxFailureAttemptsPerTicket: 2}, a, store, r, NewAttestor("self", msg, store), "self") + require.NoError(t, err) + + require.NoError(t, svc.Tick(ctx)) + require.Empty(t, r.calls, "recheck execution should be skipped after the per-ticket failure budget is exhausted") + require.Empty(t, msg.calls, "no chain submission should be attempted for a budget-blocked candidate") +} + func TestConfigDefaults(t *testing.T) { got := (Config{}).WithDefaults() require.Equal(t, DefaultLookbackEpochs, got.LookbackEpochs) diff --git a/supernode/recheck/service.go b/supernode/recheck/service.go index 6b0deefa..2aa86db1 100644 --- a/supernode/recheck/service.go +++ b/supernode/recheck/service.go @@ -7,14 +7,17 @@ import ( audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" ) type Config struct { - Enabled bool - LookbackEpochs uint64 - MaxPerTick int - TickInterval time.Duration - Jitter time.Duration + Enabled bool + LookbackEpochs uint64 + MaxPerTick int + TickInterval time.Duration + Jitter time.Duration + MaxFailureAttemptsPerTicket int + FailureBackoffTTL time.Duration } func (c Config) WithDefaults() Config { @@ -30,12 +33,19 @@ func (c Config) WithDefaults() Config { if c.Jitter < 0 { c.Jitter = 0 } + if c.MaxFailureAttemptsPerTicket <= 0 { + c.MaxFailureAttemptsPerTicket = DefaultMaxFailureAttemptsPerTicket + } + if c.FailureBackoffTTL <= 0 { + c.FailureBackoffTTL = DefaultFailureBackoffTTL + } return c } type Service struct { cfg Config audit AuditReader + store Store finder *Finder rechecker Rechecker attestor *Attestor @@ -51,7 +61,7 @@ func NewServiceWithReporters(cfg Config, audit AuditReader, store Store, recheck return nil, fmt.Errorf("recheck service missing deps") } finder := NewFinderWithReporters(audit, store, self, FinderConfig{LookbackEpochs: cfg.LookbackEpochs, MaxPerTick: cfg.MaxPerTick}, reporters) - return &Service{cfg: cfg, audit: audit, finder: finder, rechecker: rechecker, attestor: attestor}, nil + return &Service{cfg: cfg, audit: audit, store: store, finder: finder, rechecker: rechecker, attestor: attestor}, nil } func (s *Service) Run(ctx context.Context) error { @@ -98,16 +108,32 @@ func (s *Service) Tick(ctx context.Context) error { if err != nil { return err } + lep6metrics.SetRecheckPendingCandidates(len(candidates)) + _ = s.store.PurgeExpiredRecheckAttemptFailures(ctx) for _, c := range candidates { + lep6metrics.IncRecheckCandidateFound() if err := ctx.Err(); err != nil { return nil } + blocked, err := s.store.HasRecheckAttemptFailureBudgetExceeded(ctx, c.EpochID, c.TicketID, s.cfg.MaxFailureAttemptsPerTicket) + if err != nil { + logtrace.Warn(ctx, "lep6 recheck: failure budget lookup failed", logtrace.Fields{"epoch_id": c.EpochID, "ticket_id": c.TicketID, "error": err.Error()}) + continue + } + if blocked { + logtrace.Warn(ctx, "lep6 recheck: skipping candidate after failure budget exhausted", logtrace.Fields{"epoch_id": c.EpochID, "ticket_id": c.TicketID}) + continue + } result, err := s.rechecker.Recheck(ctx, c) if err != nil { + _ = s.store.RecordRecheckAttemptFailure(ctx, c.EpochID, c.TicketID, c.TargetAccount, err, s.cfg.FailureBackoffTTL) + lep6metrics.IncRecheckFailure("execute") logtrace.Warn(ctx, "lep6 recheck: execution failed", logtrace.Fields{"epoch_id": c.EpochID, "ticket_id": c.TicketID, "error": err.Error()}) continue } if err := s.attestor.Submit(ctx, c, result); err != nil { + _ = s.store.RecordRecheckAttemptFailure(ctx, c.EpochID, c.TicketID, c.TargetAccount, err, s.cfg.FailureBackoffTTL) + lep6metrics.IncRecheckFailure("submit") logtrace.Warn(ctx, "lep6 recheck: submit failed", logtrace.Fields{"epoch_id": c.EpochID, "ticket_id": c.TicketID, "error": err.Error()}) } } diff --git a/supernode/recheck/test_helpers_test.go b/supernode/recheck/test_helpers_test.go index 2df95676..5b8deb85 100644 --- a/supernode/recheck/test_helpers_test.go +++ b/supernode/recheck/test_helpers_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "time" audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" sdktx "github.com/cosmos/cosmos-sdk/types/tx" @@ -18,20 +19,45 @@ var callSeq int type memoryStore struct { seen map[string]bool + failures map[string]int recordCallIndex int } -func newMemoryStore() *memoryStore { return &memoryStore{seen: map[string]bool{}} } +func newMemoryStore() *memoryStore { + return &memoryStore{seen: map[string]bool{}, failures: map[string]int{}} +} func (m *memoryStore) HasRecheckSubmission(_ context.Context, epochID uint64, ticketID string) (bool, error) { return m.seen[key(epochID, ticketID)], nil } +func (m *memoryStore) RecordPendingRecheckSubmission(_ context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass) error { + callSeq++ + m.recordCallIndex = callSeq + m.seen[key(epochID, ticketID)] = true + return nil +} +func (m *memoryStore) MarkRecheckSubmissionSubmitted(_ context.Context, epochID uint64, ticketID string) error { + m.seen[key(epochID, ticketID)] = true + return nil +} +func (m *memoryStore) DeletePendingRecheckSubmission(_ context.Context, epochID uint64, ticketID string) error { + delete(m.seen, key(epochID, ticketID)) + return nil +} func (m *memoryStore) RecordRecheckSubmission(_ context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass) error { callSeq++ m.recordCallIndex = callSeq m.seen[key(epochID, ticketID)] = true return nil } -func key(epochID uint64, ticketID string) string { return fmt.Sprintf("%d/%s", epochID, ticketID) } +func (m *memoryStore) RecordRecheckAttemptFailure(_ context.Context, epochID uint64, ticketID, targetAccount string, err error, ttl time.Duration) error { + m.failures[key(epochID, ticketID)]++ + return nil +} +func (m *memoryStore) HasRecheckAttemptFailureBudgetExceeded(_ context.Context, epochID uint64, ticketID string, maxAttempts int) (bool, error) { + return maxAttempts > 0 && m.failures[key(epochID, ticketID)] >= maxAttempts, nil +} +func (m *memoryStore) PurgeExpiredRecheckAttemptFailures(_ context.Context) error { return nil } +func key(epochID uint64, ticketID string) string { return fmt.Sprintf("%d/%s", epochID, ticketID) } type recordingAuditMsg struct { calls []submitCall diff --git a/supernode/recheck/types.go b/supernode/recheck/types.go index b0d6888e..405951e4 100644 --- a/supernode/recheck/types.go +++ b/supernode/recheck/types.go @@ -9,9 +9,11 @@ import ( ) const ( - DefaultLookbackEpochs = uint64(7) - DefaultMaxPerTick = 5 - DefaultTickInterval = time.Minute + DefaultLookbackEpochs = uint64(7) + DefaultMaxPerTick = 5 + DefaultTickInterval = time.Minute + DefaultMaxFailureAttemptsPerTicket = 3 + DefaultFailureBackoffTTL = 15 * time.Minute ) type Outcome int @@ -41,7 +43,13 @@ type RecheckResult struct { type Store interface { HasRecheckSubmission(ctx context.Context, epochID uint64, ticketID string) (bool, error) + RecordPendingRecheckSubmission(ctx context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass) error + MarkRecheckSubmissionSubmitted(ctx context.Context, epochID uint64, ticketID string) error + DeletePendingRecheckSubmission(ctx context.Context, epochID uint64, ticketID string) error RecordRecheckSubmission(ctx context.Context, epochID uint64, ticketID, targetAccount, challengedTranscriptHash, recheckTranscriptHash string, resultClass audittypes.StorageProofResultClass) error + RecordRecheckAttemptFailure(ctx context.Context, epochID uint64, ticketID, targetAccount string, err error, ttl time.Duration) error + HasRecheckAttemptFailureBudgetExceeded(ctx context.Context, epochID uint64, ticketID string, maxAttempts int) (bool, error) + PurgeExpiredRecheckAttemptFailures(ctx context.Context) error } type AuditReader interface { diff --git a/supernode/self_healing/finalizer.go b/supernode/self_healing/finalizer.go index d86d8171..829c81f0 100644 --- a/supernode/self_healing/finalizer.go +++ b/supernode/self_healing/finalizer.go @@ -8,6 +8,7 @@ import ( audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" ) @@ -81,6 +82,7 @@ func (s *Service) publishStagingDir(ctx context.Context, claim queries.HealClaim if err := s.store.DeleteHealClaim(ctx, claim.HealOpID); err != nil { return fmt.Errorf("delete heal claim row: %w", err) } + lep6metrics.IncHealFinalizePublish() logtrace.Info(ctx, "self_healing(LEP-6): published staged artefacts to KAD", logtrace.Fields{ "heal_op_id": claim.HealOpID, "ticket_id": claim.TicketID, @@ -100,6 +102,7 @@ func (s *Service) cleanupClaim(ctx context.Context, claim queries.HealClaimRecor if err := s.store.DeleteHealClaim(ctx, claim.HealOpID); err != nil { return fmt.Errorf("delete heal claim row: %w", err) } + lep6metrics.IncHealFinalizeCleanup(status.String()) logtrace.Info(ctx, "self_healing(LEP-6): claim cleaned up (no publish)", logtrace.Fields{ "heal_op_id": claim.HealOpID, "status": status.String(), diff --git a/supernode/self_healing/healer.go b/supernode/self_healing/healer.go index 7fb6e7f1..ac97bc2f 100644 --- a/supernode/self_healing/healer.go +++ b/supernode/self_healing/healer.go @@ -10,6 +10,7 @@ import ( audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" ) @@ -69,11 +70,21 @@ func (s *Service) reconstructAndClaim(ctx context.Context, op audittypes.HealOp) return fmt.Errorf("empty manifest hash") } - // Submit FIRST — let chain be the source of truth. Only persist on - // chain acceptance. + // Pre-stage before chain submit. This closes the restart window where the + // tx is accepted but the process dies before recording local dedup state; + // on restart, the pending row prevents a duplicate submit loop and lets + // finalizer/reconciliation continue from local durable state. + if err := s.store.RecordPendingHealClaim(ctx, op.HealOpId, op.TicketId, manifestHash, stagingDir); err != nil { + if errors.Is(err, queries.ErrLEP6ClaimAlreadyRecorded) { + lep6metrics.IncHealClaim("dedup") + return nil + } + _ = os.RemoveAll(stagingDir) + lep6metrics.IncHealClaim("stage_error") + return fmt.Errorf("stage heal claim before submit: %w", err) + } + if _, err := s.lumera.AuditMsg().ClaimHealComplete(ctx, op.HealOpId, op.TicketId, manifestHash, ""); err != nil { - // If the chain rejected because the op already moved past SCHEDULED - // (a prior submit that we lost the response for), reconcile. if isChainHealOpInvalidState(err) { if recErr := s.reconcileExistingClaim(ctx, op, manifestHash, stagingDir); recErr != nil { _ = os.RemoveAll(stagingDir) @@ -81,19 +92,17 @@ func (s *Service) reconstructAndClaim(ctx context.Context, op audittypes.HealOp) } return nil } + _ = s.store.DeletePendingHealClaim(ctx, op.HealOpId) _ = os.RemoveAll(stagingDir) + lep6metrics.IncHealClaim("submit_error") return fmt.Errorf("submit claim: %w", err) } - if err := s.store.RecordHealClaim(ctx, op.HealOpId, op.TicketId, manifestHash, stagingDir); err != nil { - if errors.Is(err, queries.ErrLEP6ClaimAlreadyRecorded) { - // Concurrent tick beat us; staging on disk matches. - return nil - } - // Persist failed but chain accepted — we'll see the row missing - // next tick; reconcileExistingClaim will fix it on retry. - return fmt.Errorf("record heal claim (chain accepted): %w", err) + if err := s.store.MarkHealClaimSubmitted(ctx, op.HealOpId); err != nil { + lep6metrics.IncHealClaim("mark_error") + return fmt.Errorf("mark heal claim submitted (chain accepted): %w", err) } + lep6metrics.IncHealClaim("submitted") logtrace.Info(ctx, "self_healing(LEP-6): claim submitted", logtrace.Fields{ "heal_op_id": op.HealOpId, "ticket_id": op.TicketId, @@ -136,16 +145,25 @@ func (s *Service) reconcileExistingClaim(ctx context.Context, op audittypes.Heal _ = os.RemoveAll(stagingDir) return nil } - // Manifest matches — persist dedup row (no-op if already present) so - // finalizer can publish on VERIFIED. - if err := s.store.RecordHealClaim(ctx, op.HealOpId, op.TicketId, manifestHash, stagingDir); err != nil && !errors.Is(err, queries.ErrLEP6ClaimAlreadyRecorded) { - return fmt.Errorf("record reconciled claim: %w", err) + // Manifest matches — persist/mark dedup row so finalizer can publish on + // VERIFIED. If this tick pre-staged the row before seeing the already-on- + // chain error, mark it submitted; otherwise insert a submitted row. + if err := s.store.RecordHealClaim(ctx, op.HealOpId, op.TicketId, manifestHash, stagingDir); err != nil { + if errors.Is(err, queries.ErrLEP6ClaimAlreadyRecorded) { + if markErr := s.store.MarkHealClaimSubmitted(ctx, op.HealOpId); markErr != nil { + return fmt.Errorf("mark reconciled claim submitted: %w", markErr) + } + } else { + return fmt.Errorf("record reconciled claim: %w", err) + } } logtrace.Info(ctx, "self_healing(LEP-6): reconciled existing chain claim", logtrace.Fields{ "heal_op_id": op.HealOpId, "chain_status": chainOp.Status.String(), "manifest_h": manifestHash, }) + lep6metrics.IncHealClaimReconciled() + lep6metrics.IncHealClaim("reconciled") return nil } diff --git a/supernode/self_healing/mocks_test.go b/supernode/self_healing/mocks_test.go index ec0f5473..90bcdcc1 100644 --- a/supernode/self_healing/mocks_test.go +++ b/supernode/self_healing/mocks_test.go @@ -20,6 +20,7 @@ type programmableAudit struct { opsByStatus map[audittypes.HealOpStatus][]audittypes.HealOp opsByID map[uint64]audittypes.HealOp getOpErr error + blockStatus map[audittypes.HealOpStatus]bool } func newProgrammableAudit(mode audittypes.StorageTruthEnforcementMode) *programmableAudit { @@ -29,6 +30,7 @@ func newProgrammableAudit(mode audittypes.StorageTruthEnforcementMode) *programm }, opsByStatus: map[audittypes.HealOpStatus][]audittypes.HealOp{}, opsByID: map[uint64]audittypes.HealOp{}, + blockStatus: map[audittypes.HealOpStatus]bool{}, } } @@ -65,6 +67,13 @@ func (p *programmableAudit) GetHealOp(ctx context.Context, healOpID uint64) (*au return &audittypes.QueryHealOpResponse{HealOp: op}, nil } func (p *programmableAudit) GetHealOpsByStatus(ctx context.Context, status audittypes.HealOpStatus, pagination *query.PageRequest) (*audittypes.QueryHealOpsByStatusResponse, error) { + p.mu.Lock() + block := p.blockStatus[status] + p.mu.Unlock() + if block { + <-ctx.Done() + return nil, ctx.Err() + } p.mu.Lock() defer p.mu.Unlock() out := make([]audittypes.HealOp, 0, len(p.opsByStatus[status])) diff --git a/supernode/self_healing/service.go b/supernode/self_healing/service.go index 73106770..988a4720 100644 --- a/supernode/self_healing/service.go +++ b/supernode/self_healing/service.go @@ -58,6 +58,7 @@ import ( audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" "golang.org/x/sync/semaphore" @@ -73,6 +74,7 @@ const ( defaultVerifierFetchTimeout = 60 * time.Second defaultVerifierFetchAttempts = 3 defaultVerifierBackoffBase = 2 * time.Second + defaultAuditQueryTimeout = 10 * time.Second ) // Config captures supernode-binary-owned tunables for the LEP-6 heal runtime. @@ -98,6 +100,12 @@ type Config struct { VerifierFetchAttempts int VerifierBackoffBase time.Duration + // AuditQueryTimeout bounds each chain query made by the dispatcher. A + // wedged status/params query must not pin the whole tick forever and starve + // other roles (especially verifier dispatch while a healer-reported op is + // waiting on quorum before deadline). + AuditQueryTimeout time.Duration + // KeyName is the supernode's keyring key used to sign claim/verification // txs. Must match the on-chain HealerSupernodeAccount / // VerifierSupernodeAccount. @@ -134,6 +142,9 @@ func (c Config) withDefaults() Config { if c.VerifierBackoffBase <= 0 { c.VerifierBackoffBase = defaultVerifierBackoffBase } + if c.AuditQueryTimeout <= 0 { + c.AuditQueryTimeout = defaultAuditQueryTimeout + } return c } @@ -285,7 +296,9 @@ func (s *Service) tick(ctx context.Context) error { // modeGate returns (skip=true) when the chain enforcement mode is // UNSPECIFIED. Heal-ops only exist in SHADOW/SOFT/FULL. func (s *Service) modeGate(ctx context.Context) (bool, error) { - resp, err := s.lumera.Audit().GetParams(ctx) + queryCtx, cancel := s.auditQueryContext(ctx) + defer cancel() + resp, err := s.lumera.Audit().GetParams(queryCtx) if err != nil { return false, err } @@ -349,9 +362,19 @@ func (s *Service) dispatchVerifierOps(ctx context.Context) error { if err != nil { return err } + if len(ops) > 0 { + logtrace.Info(ctx, "self_healing(LEP-6): verifier status scan", logtrace.Fields{ + "identity": s.identity, + "ops": len(ops), + }) + } for i := range ops { op := ops[i] if !accountInList(s.identity, op.VerifierSupernodeAccounts) { + logtrace.Debug(ctx, "self_healing(LEP-6): verifier op not assigned locally", logtrace.Fields{ + "identity": s.identity, + "heal_op_id": op.HealOpId, + }) continue } if isFinalStatus(op.Status) { @@ -373,12 +396,21 @@ func (s *Service) dispatchVerifierOps(ctx context.Context) error { } go func(op audittypes.HealOp, key string) { defer s.inFlight.Delete(key) + logtrace.Info(ctx, "self_healing(LEP-6): verifier dispatch start", logtrace.Fields{ + "identity": s.identity, + "heal_op_id": op.HealOpId, + "ticket_id": op.TicketId, + }) if err := s.verifyAndSubmit(ctx, op); err != nil { logtrace.Warn(ctx, "self_healing(LEP-6): verifyAndSubmit", logtrace.Fields{ logtrace.FieldError: err.Error(), "heal_op_id": op.HealOpId, }) } + logtrace.Info(ctx, "self_healing(LEP-6): verifier dispatch end", logtrace.Fields{ + "identity": s.identity, + "heal_op_id": op.HealOpId, + }) }(op, key) } return nil @@ -392,6 +424,8 @@ func (s *Service) dispatchFinalizer(ctx context.Context) error { if err != nil { return err } + lep6metrics.SetSelfHealingPendingClaims(len(claims)) + lep6metrics.SetSelfHealingStagingBytes(totalStagingBytes(claims)) for _, claim := range claims { key := opRoleKey(claim.HealOpID, rolePublisher) if _, loaded := s.inFlight.LoadOrStore(key, struct{}{}); loaded { @@ -412,7 +446,9 @@ func (s *Service) dispatchFinalizer(ctx context.Context) error { // listOps wraps the paginated audit query. Returns a flattened slice. func (s *Service) listOps(ctx context.Context, status audittypes.HealOpStatus) ([]audittypes.HealOp, error) { - resp, err := s.lumera.Audit().GetHealOpsByStatus(ctx, status, nil) + queryCtx, cancel := s.auditQueryContext(ctx) + defer cancel() + resp, err := s.lumera.Audit().GetHealOpsByStatus(queryCtx, status, nil) if err != nil { return nil, err } @@ -422,6 +458,33 @@ func (s *Service) listOps(ctx context.Context, status audittypes.HealOpStatus) ( return resp.HealOps, nil } +func (s *Service) auditQueryContext(ctx context.Context) (context.Context, context.CancelFunc) { + timeout := s.cfg.AuditQueryTimeout + if timeout <= 0 { + timeout = defaultAuditQueryTimeout + } + return context.WithTimeout(ctx, timeout) +} + +func totalStagingBytes(claims []queries.HealClaimRecord) int64 { + var total int64 + for _, claim := range claims { + if strings.TrimSpace(claim.StagingDir) == "" { + continue + } + _ = filepath.WalkDir(claim.StagingDir, func(_ string, d os.DirEntry, err error) error { + if err != nil || d == nil || d.IsDir() { + return nil + } + if info, statErr := d.Info(); statErr == nil { + total += info.Size() + } + return nil + }) + } + return total +} + func accountInList(account string, list []string) bool { for _, a := range list { if a == account { diff --git a/supernode/self_healing/service_test.go b/supernode/self_healing/service_test.go index 6559bcd6..3924c669 100644 --- a/supernode/self_healing/service_test.go +++ b/supernode/self_healing/service_test.go @@ -2,6 +2,7 @@ package self_healing import ( "context" + "encoding/base64" "errors" "os" "path/filepath" @@ -13,6 +14,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" + "lukechampine.com/blake3" ) // helper builds a Service + its hooks for testing. Returns Service plus the @@ -209,6 +211,41 @@ func TestVerifier_FetchFailureSubmitsNonEmptyHash(t *testing.T) { } } +func TestNegativeAttestationHashUsesBlake3Convention(t *testing.T) { + reason := "fetch_failed:connection refused" + sum := blake3.Sum256([]byte("lep6:negative-attestation:" + reason)) + want := base64.StdEncoding.EncodeToString(sum[:]) + if got := negativeAttestationHash(reason); got != want { + t.Fatalf("negative attestation hash must use BLAKE3/base64 per LEP-6 storage hash convention; got %q want %q", got, want) + } +} + +func TestDispatcher_StuckScheduledQueryDoesNotStarveVerifier(t *testing.T) { + h := newHarness(t, "sn-verifier", audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL) + h.svc.cfg.AuditQueryTimeout = 20 * time.Millisecond + h.audit.blockStatus[audittypes.HealOpStatus_HEAL_OP_STATUS_SCHEDULED] = true + + body := []byte("verified-even-when-scheduled-query-hangs") + h.audit.put(audittypes.HealOp{ + HealOpId: 14, + TicketId: "ticket-verifier-not-starved", + Status: audittypes.HealOpStatus_HEAL_OP_STATUS_HEALER_REPORTED, + HealerSupernodeAccount: "sn-healer", + VerifierSupernodeAccounts: []string{"sn-verifier"}, + ResultHash: hashOf(t, body), + }) + h.svc.fetcher = &fakeFetcher{body: body} + + if err := h.svc.tick(context.Background()); err != nil { + t.Fatalf("tick should continue past a timed-out scheduled query: %v", err) + } + waitForVerifications(t, h.auditMsg, 1) + _, vc := h.auditMsg.snapshot() + if len(vc) != 1 || !vc[0].Verified { + t.Fatalf("expected verifier dispatch despite stuck scheduled query, got %+v", vc) + } +} + // --------------------------------------------------------------------------- // Test 3 — TestVerifier_FetchesFromAssignedHealerOnly (§19 gate). // --------------------------------------------------------------------------- diff --git a/supernode/self_healing/verifier.go b/supernode/self_healing/verifier.go index b8b407a3..bb9e12f6 100644 --- a/supernode/self_healing/verifier.go +++ b/supernode/self_healing/verifier.go @@ -2,7 +2,6 @@ package self_healing import ( "context" - "crypto/sha256" "encoding/base64" "errors" "fmt" @@ -12,7 +11,9 @@ import ( audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" + "lukechampine.com/blake3" ) // verifyAndSubmit runs LEP-6 §19 Phase 2 for one heal-op. @@ -107,46 +108,50 @@ func (s *Service) submitNegativeWithReason(ctx context.Context, healOpID uint64, return s.submitVerification(ctx, healOpID, false, placeholder, reason) } -// negativeAttestationHash returns a stable non-empty base64 hash derived -// from `reason` so audit trails can correlate identical failure modes. -// Format matches the action.DataHash recipe (32-byte digest, base64) so -// downstream consumers don't have to special-case width. +// negativeAttestationHash returns a stable non-empty BLAKE3/base64 hash +// derived from `reason` so audit trails can correlate identical failure +// modes while staying aligned with LEP-6/Cascade storage hash conventions. +// Format remains a 32-byte digest encoded as base64, so downstream consumers +// don't have to special-case width. func negativeAttestationHash(reason string) string { - sum := sha256.Sum256([]byte("lep6:negative-attestation:" + reason)) + sum := blake3.Sum256([]byte("lep6:negative-attestation:" + reason)) return base64.StdEncoding.EncodeToString(sum[:]) } -// submitVerification submits MsgSubmitHealVerification THEN persists the -// SQLite dedup row only on successful chain acceptance. -// -// Idempotency on retry: if the chain has already recorded a verification -// from this verifier (for instance, a previous tick's submit succeeded but -// the supernode crashed before persisting), it returns ErrHealVerification -// Exists. We treat that as success and persist the row so the next tick -// stops retrying. +// submitVerification pre-stages the SQLite dedup row before submitting +// MsgSubmitHealVerification, then marks it submitted after chain acceptance. +// This closes the submit-success/persist-crash window without weakening +// chain authority: on hard tx failure we remove only the pending row so the +// verifier can retry later. func (s *Service) submitVerification(ctx context.Context, healOpID uint64, verified bool, hash, details string) error { + if err := s.store.RecordPendingHealVerification(ctx, healOpID, s.identity, verified, hash); err != nil { + if errors.Is(err, queries.ErrLEP6VerificationAlreadyRecorded) { + lep6metrics.IncHealVerification("dedup", verified) + lep6metrics.IncHealVerificationAlreadyExists() + return nil + } + lep6metrics.IncHealVerification("stage_error", verified) + return fmt.Errorf("stage heal verification before submit: %w", err) + } + resp, err := s.lumera.AuditMsg().SubmitHealVerification(ctx, healOpID, verified, hash, details) if err != nil { - // If the chain already has a verification from us (prior submit - // succeeded but persist crashed), reconcile by persisting the - // dedup row now. if isChainVerificationAlreadyExists(err) { - if persistErr := s.store.RecordHealVerification(ctx, healOpID, s.identity, verified, hash); persistErr != nil && !errors.Is(persistErr, queries.ErrLEP6VerificationAlreadyRecorded) { - return fmt.Errorf("reconcile dedup row: %w", persistErr) + if markErr := s.store.MarkHealVerificationSubmitted(ctx, healOpID, s.identity); markErr != nil { + return fmt.Errorf("mark reconciled verification submitted: %w", markErr) } return nil } + _ = s.store.DeletePendingHealVerification(ctx, healOpID, s.identity) + lep6metrics.IncHealVerification("submit_error", verified) return err } _ = resp - // Chain accepted — persist for restart dedup. If row already exists - // (in-flight retry beat us), it's a no-op. - if err := s.store.RecordHealVerification(ctx, healOpID, s.identity, verified, hash); err != nil { - if errors.Is(err, queries.ErrLEP6VerificationAlreadyRecorded) { - return nil - } - return fmt.Errorf("record heal verification: %w", err) + if err := s.store.MarkHealVerificationSubmitted(ctx, healOpID, s.identity); err != nil { + lep6metrics.IncHealVerification("mark_error", verified) + return fmt.Errorf("mark heal verification submitted: %w", err) } + lep6metrics.IncHealVerification("submitted", verified) return nil } diff --git a/supernode/status/service.go b/supernode/status/service.go index c0fb5cab..33d15541 100644 --- a/supernode/status/service.go +++ b/supernode/status/service.go @@ -9,6 +9,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" "github.com/LumeraProtocol/supernode/v2/pkg/task" "github.com/LumeraProtocol/supernode/v2/supernode/config" ) @@ -58,6 +59,42 @@ func (s *SupernodeStatusService) GetChainID() string { return "" } +func lep6StatusMetrics(s lep6metrics.MetricsSnapshot) *pb.StatusResponse_LEP6Metrics { + return &pb.StatusResponse_LEP6Metrics{ + DispatchResultsTotal: cloneUint64Map(s.DispatchResultsTotal), + DispatchThrottledTotal: cloneUint64Map(s.DispatchThrottledTotal), + DispatchEpochDurationMillisTotal: cloneUint64Map(s.DispatchEpochDurationMillisTotal), + DispatchEpochDurationMillisMax: cloneUint64Map(s.DispatchEpochDurationMillisMax), + DispatchEpochDurationCount: cloneUint64Map(s.DispatchEpochDurationCount), + TicketDiscoveryTotal: cloneUint64Map(s.TicketDiscoveryTotal), + NoTicketProviderActive: s.NoTicketProviderActive, + HealClaimsSubmittedTotal: cloneUint64Map(s.HealClaimsSubmittedTotal), + HealClaimsReconciledTotal: s.HealClaimsReconciledTotal, + HealVerificationsSubmittedTotal: cloneUint64Map(s.HealVerificationsSubmittedTotal), + HealVerificationsAlreadyExistsTotal: s.HealVerificationsAlreadyExistsTotal, + HealFinalizePublishesTotal: s.HealFinalizePublishesTotal, + HealFinalizeCleanupsTotal: cloneUint64Map(s.HealFinalizeCleanupsTotal), + SelfHealingPendingClaims: s.SelfHealingPendingClaims, + SelfHealingStagingBytes: s.SelfHealingStagingBytes, + RecheckCandidatesFoundTotal: s.RecheckCandidatesFoundTotal, + RecheckEvidenceSubmittedTotal: cloneUint64Map(s.RecheckEvidenceSubmittedTotal), + RecheckEvidenceAlreadySubmittedTotal: s.RecheckEvidenceAlreadySubmittedTotal, + RecheckExecutionFailuresTotal: cloneUint64Map(s.RecheckExecutionFailuresTotal), + RecheckPendingCandidates: s.RecheckPendingCandidates, + } +} + +func cloneUint64Map(in map[string]uint64) map[string]uint64 { + if len(in) == 0 { + return nil + } + out := make(map[string]uint64, len(in)) + for k, v := range in { + out[k] = v + } + return out +} + // GetStatus returns the current system status including optional P2P info func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (*pb.StatusResponse, error) { fields := logtrace.Fields{logtrace.FieldMethod: "GetStatus", logtrace.FieldModule: "SupernodeStatusService"} @@ -130,6 +167,11 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric } } + // LEP-6 metrics are cheap in-memory counters/gauges. Include them on every + // status response so operators can inspect storage-truth runtime state through + // the existing status endpoint instead of a LEP-6-only metrics endpoint. + resp.Lep6Metrics = lep6StatusMetrics(lep6metrics.Snapshot()) + if includeP2PMetrics && s.p2pService != nil { // Prepare optional P2P metrics container (only when requested). pm := &pb.StatusResponse_P2PMetrics{ diff --git a/supernode/status/service_test.go b/supernode/status/service_test.go index c1950df8..217e38b2 100644 --- a/supernode/status/service_test.go +++ b/supernode/status/service_test.go @@ -3,6 +3,7 @@ package status import ( "testing" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" "github.com/LumeraProtocol/supernode/v2/supernode/config" ) @@ -20,3 +21,38 @@ func TestNewSupernodeStatusService_StoragePathsUsesBaseDir(t *testing.T) { t.Fatalf("unexpected storagePaths: %#v", svc.storagePaths) } } + +func TestStatusResponse_ExposesLEP6MetricsSnapshot(t *testing.T) { + lep6metrics.Reset() + lep6metrics.IncDispatchResult("PASS") + lep6metrics.IncHealClaim("submitted") + lep6metrics.IncHealVerification("submitted", true) + lep6metrics.IncRecheckSubmission("RECHECK_CONFIRMED_FAIL", "submitted") + lep6metrics.SetSelfHealingPendingClaims(2) + t.Cleanup(lep6metrics.Reset) + + svc := NewSupernodeStatusService(nil, nil, nil, nil) + resp, err := svc.GetStatus(t.Context(), false) + if err != nil { + t.Fatalf("GetStatus() error = %v", err) + } + if resp.GetLep6Metrics() == nil { + t.Fatal("GetStatus() did not include LEP-6 metrics snapshot") + } + lep6 := resp.GetLep6Metrics() + if got := lep6.GetDispatchResultsTotal()["pass"]; got != 1 { + t.Fatalf("dispatch pass counter = %d, want 1 (all=%#v)", got, lep6.GetDispatchResultsTotal()) + } + if got := lep6.GetHealClaimsSubmittedTotal()["submitted"]; got != 1 { + t.Fatalf("heal claim submitted counter = %d, want 1", got) + } + if got := lep6.GetHealVerificationsSubmittedTotal()["verified=positive,result=submitted"]; got != 1 { + t.Fatalf("heal verification submitted counter = %d, want 1", got) + } + if got := lep6.GetRecheckEvidenceSubmittedTotal()["class=recheck_confirmed_fail,outcome=submitted"]; got != 1 { + t.Fatalf("recheck evidence submitted counter = %d, want 1", got) + } + if got := lep6.GetSelfHealingPendingClaims(); got != 2 { + t.Fatalf("self-healing pending claims = %d, want 2", got) + } +} diff --git a/supernode/storage_challenge/lep6_dispatch.go b/supernode/storage_challenge/lep6_dispatch.go index 69f7a800..beb5fa69 100644 --- a/supernode/storage_challenge/lep6_dispatch.go +++ b/supernode/storage_challenge/lep6_dispatch.go @@ -7,6 +7,7 @@ import ( "fmt" "strings" "sync" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -17,6 +18,7 @@ import ( snkeyring "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge" "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge/deterministic" "github.com/cosmos/cosmos-sdk/crypto/keyring" @@ -168,6 +170,9 @@ func NewLEP6Dispatcher( // Per-target failures are surfaced as StorageProofResult{ResultClass=FAIL} // rather than returning an error. func (d *LEP6Dispatcher) DispatchEpoch(ctx context.Context, epochID uint64) error { + started := time.Now() + defer func() { lep6metrics.ObserveDispatchEpochDuration("challenger", time.Since(started)) }() + paramsResp, err := d.client.Audit().GetParams(ctx) if err != nil { return fmt.Errorf("lep6 dispatch: get params: %w", err) @@ -254,6 +259,7 @@ func (d *LEP6Dispatcher) dispatchTarget( if err != nil { // Treat as transient; emit no-eligible for both buckets so the // chain still sees this epoch covered. + lep6metrics.SetNoTicketProviderActive(true) logtrace.Warn(ctx, "lep6 dispatch: ticket provider error", logtrace.Fields{ "epoch_id": epochID, "target": target, "error": err.Error(), }) @@ -274,12 +280,14 @@ func (d *LEP6Dispatcher) dispatchTarget( } if len(eligibleIDs) == 0 { + lep6metrics.SetNoTicketProviderActive(true) d.appendNoEligible(ctx, epochID, anchor, target, bucket) continue } ticketID := deterministic.SelectTicketForBucket(eligibleIDs, nil, anchor.Seed, target, bucket) if ticketID == "" { + lep6metrics.SetNoTicketProviderActive(true) d.appendNoEligible(ctx, epochID, anchor, target, bucket) continue } @@ -316,6 +324,7 @@ func (d *LEP6Dispatcher) appendNoEligible( } sig, _ := snkeyring.SignBytes(d.keyring, d.keyName, []byte(transcriptHashHex)) + lep6metrics.IncDispatchResult(audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_NO_ELIGIBLE_TICKET.String()) d.buffer.Append(epochID, &audittypes.StorageProofResult{ TargetSupernodeAccount: target, ChallengerSupernodeAccount: d.self, @@ -470,6 +479,7 @@ func (d *LEP6Dispatcher) dispatchTicket( return fmt.Errorf("sign transcript: %w", signErr) } + lep6metrics.IncDispatchResult(audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_PASS.String()) d.buffer.Append(epochID, &audittypes.StorageProofResult{ TargetSupernodeAccount: target, ChallengerSupernodeAccount: d.self, @@ -521,6 +531,7 @@ func (d *LEP6Dispatcher) appendFail( } sig, _ := snkeyring.SignBytes(d.keyring, d.keyName, []byte(transcriptHashHex)) + lep6metrics.IncDispatchResult(resultClass.String()) d.buffer.Append(epochID, &audittypes.StorageProofResult{ TargetSupernodeAccount: target, ChallengerSupernodeAccount: d.self, diff --git a/supernode/storage_challenge/result_buffer.go b/supernode/storage_challenge/result_buffer.go index d1a920f5..25b5c3a9 100644 --- a/supernode/storage_challenge/result_buffer.go +++ b/supernode/storage_challenge/result_buffer.go @@ -7,6 +7,7 @@ import ( audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge" ) @@ -125,11 +126,13 @@ func throttleResults(epochID uint64, results []*audittypes.StorageProofResult, m kept = append(kept, recent...) kept = append(kept, nonRecent...) + dropped := originalCount - len(kept) + lep6metrics.IncDispatchThrottled("drop-non-RECENT-first", dropped) logtrace.Warn(context.Background(), "storage_challenge: result buffer throttled to chain cap", logtrace.Fields{ "epoch_id": epochID, "original": originalCount, "kept": len(kept), - "dropped": originalCount - len(kept), + "dropped": dropped, "cap": maxKeep, "policy": "drop-non-RECENT-first", }) diff --git a/supernode/storage_challenge/ticket_provider.go b/supernode/storage_challenge/ticket_provider.go index 4b647596..98f7bdc1 100644 --- a/supernode/storage_challenge/ticket_provider.go +++ b/supernode/storage_challenge/ticket_provider.go @@ -7,6 +7,8 @@ import ( actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + lep6metrics "github.com/LumeraProtocol/supernode/v2/pkg/metrics/lep6" + "github.com/cosmos/gogoproto/proto" ) // ChainTicketProvider discovers finalized cascade actions assigned to a target @@ -43,6 +45,7 @@ func (p *ChainTicketProvider) TicketsForTarget(ctx context.Context, targetSupern seen := make(map[string]struct{}, len(resp.Actions)) for _, act := range resp.Actions { if !isEligibleCascadeAction(act, target) { + lep6metrics.IncTicketDiscovery("ineligible") continue } id := strings.TrimSpace(act.ActionID) @@ -53,6 +56,7 @@ func (p *ChainTicketProvider) TicketsForTarget(ctx context.Context, targetSupern continue } seen[id] = struct{}{} + lep6metrics.IncTicketDiscovery("eligible") out = append(out, TicketDescriptor{TicketID: id, AnchorBlock: act.BlockHeight}) } @@ -76,6 +80,9 @@ func isEligibleCascadeAction(act *actiontypes.Action, target string) bool { if act.BlockHeight <= 0 { return false } + if !hasValidCascadeMetadata(act.Metadata) { + return false + } for _, sn := range act.SuperNodes { if strings.TrimSpace(sn) == target { return true @@ -83,3 +90,23 @@ func isEligibleCascadeAction(act *actiontypes.Action, target string) bool { } return false } + +func hasValidCascadeMetadata(raw []byte) bool { + if len(raw) == 0 { + return false + } + var meta actiontypes.CascadeMetadata + if err := proto.Unmarshal(raw, &meta); err != nil { + return false + } + if strings.TrimSpace(meta.DataHash) == "" { + return false + } + if meta.RqIdsMax == 0 || len(meta.RqIdsIds) == 0 { + return false + } + if meta.IndexArtifactCount == 0 || meta.SymbolArtifactCount == 0 { + return false + } + return true +} diff --git a/supernode/storage_challenge/ticket_provider_test.go b/supernode/storage_challenge/ticket_provider_test.go index 90e4d311..95981517 100644 --- a/supernode/storage_challenge/ticket_provider_test.go +++ b/supernode/storage_challenge/ticket_provider_test.go @@ -7,6 +7,7 @@ import ( actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" lumeraMock "github.com/LumeraProtocol/supernode/v2/pkg/lumera" actionmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" + "github.com/cosmos/gogoproto/proto" "go.uber.org/mock/gomock" ) @@ -15,15 +16,17 @@ func TestChainTicketProviderFiltersFinalizedCascadeActions(t *testing.T) { client := lumeraMock.NewMockClient(ctrl) actions := actionmod.NewMockModule(ctrl) + metadata := validCascadeMetadata(t) client.EXPECT().Action().Return(actions).Times(2) actions.EXPECT().ListActionsBySuperNode(gomock.Any(), "sn-target").Return(&actiontypes.QueryListActionsBySuperNodeResponse{Actions: []*actiontypes.Action{ - {ActionID: "sym-old", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 99, SuperNodes: []string{"sn-target"}}, - {ActionID: "sym-approved", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateApproved, BlockHeight: 100, SuperNodes: []string{"sn-target"}}, - {ActionID: "sym-old", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 99, SuperNodes: []string{"sn-target"}}, // duplicate - {ActionID: "pending", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStatePending, BlockHeight: 101, SuperNodes: []string{"sn-target"}}, - {ActionID: "wrong-type", ActionType: actiontypes.ActionTypeSense, State: actiontypes.ActionStateDone, BlockHeight: 102, SuperNodes: []string{"sn-target"}}, - {ActionID: "wrong-target", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 103, SuperNodes: []string{"other"}}, - {ActionID: "zero-height", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 0, SuperNodes: []string{"sn-target"}}, + {ActionID: "sym-old", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 99, SuperNodes: []string{"sn-target"}, Metadata: metadata}, + {ActionID: "sym-approved", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateApproved, BlockHeight: 100, SuperNodes: []string{"sn-target"}, Metadata: metadata}, + {ActionID: "sym-old", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 99, SuperNodes: []string{"sn-target"}, Metadata: metadata}, // duplicate + {ActionID: "pending", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStatePending, BlockHeight: 101, SuperNodes: []string{"sn-target"}, Metadata: metadata}, + {ActionID: "wrong-type", ActionType: actiontypes.ActionTypeSense, State: actiontypes.ActionStateDone, BlockHeight: 102, SuperNodes: []string{"sn-target"}, Metadata: metadata}, + {ActionID: "wrong-target", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 103, SuperNodes: []string{"other"}, Metadata: metadata}, + {ActionID: "zero-height", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 0, SuperNodes: []string{"sn-target"}, Metadata: metadata}, + {ActionID: "bad-metadata", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 104, SuperNodes: []string{"sn-target"}, Metadata: []byte("not-proto")}, }}, nil) got, err := NewChainTicketProvider(client).TicketsForTarget(context.Background(), "sn-target") @@ -40,3 +43,18 @@ func TestChainTicketProviderFiltersFinalizedCascadeActions(t *testing.T) { t.Fatalf("second sorted ticket mismatch: %#v", got[1]) } } + +func validCascadeMetadata(t *testing.T) []byte { + t.Helper() + bz, err := proto.Marshal(&actiontypes.CascadeMetadata{ + DataHash: "hash", + RqIdsMax: 3, + RqIdsIds: []string{"rq-1"}, + IndexArtifactCount: 1, + SymbolArtifactCount: 1, + }) + if err != nil { + t.Fatalf("marshal metadata: %v", err) + } + return bz +} diff --git a/tests/scripts/setup-supernodes.sh b/tests/scripts/setup-supernodes.sh index da643f5a..39cd9ee0 100755 --- a/tests/scripts/setup-supernodes.sh +++ b/tests/scripts/setup-supernodes.sh @@ -50,7 +50,7 @@ setup_primary() { CGO_ENABLED=1 \ GOOS=linux \ GOARCH=amd64 \ - go build \ + "${GO:-go}" build \ -trimpath \ -ldflags="-s -w" \ -o "$DATA_DIR/supernode" "$SUPERNODE_SRC" || error "Failed to build supernode binary" diff --git a/tests/system/config.lep6-1.yml b/tests/system/config.lep6-1.yml new file mode 100644 index 00000000..82de19ed --- /dev/null +++ b/tests/system/config.lep6-1.yml @@ -0,0 +1,59 @@ +# Note: During tests, local loopback/localhost is allowed by the P2P layer +# when INTEGRATION_TEST=true (set by tests). No change needed here. +# Supernode Configuration +supernode: + key_name: "testkey1" + identity: "lumera1em87kgrvgttrkvuamtetyaagjrhnu3vjy44at4" + host: "0.0.0.0" + port: 4444 + gateway_port: 8002 + +# Keyring Configuration +keyring: + backend: "test" # Options: test, file, os + dir: "keys" # Relative to base_dir + password: "keyring-password" + +# P2P Network Configuration +p2p: + port: 4445 + data_dir: "data/p2p" # Relative to base_dir + +# Lumera Chain Configuration +lumera: + grpc_addr: "localhost:9090" + chain_id: "testing" + +# RaptorQ Configuration +raptorq: + files_dir: "raptorq_files" # Relative to base_dir + +storage_challenge: + # Runtime e2e submits deterministic chain reports manually. Keep the automatic + # dispatcher off so pre-upload P2P allowlist refreshes do not perturb the + # Cascade upload baseline before the heal-op is scheduled. + enabled: false + poll_interval_ms: 5000 + submit_evidence: false + lep6: + enabled: false + max_concurrent_targets: 4 + recipient_read_timeout: 30s + recheck: + enabled: true + lookback_epochs: 7 + max_per_tick: 5 + tick_interval_ms: 60000 + max_failure_attempts_per_ticket: 3 + failure_backoff_ttl_ms: 900000 +self_healing: + enabled: true + poll_interval_ms: 5000 + max_concurrent_reconstructs: 2 + max_concurrent_verifications: 4 + max_concurrent_publishes: 2 + staging_dir: supernode-lep6-data1/heal-staging + verifier_fetch_timeout_ms: 60000 + verifier_fetch_attempts: 3 + verifier_backoff_base_ms: 2000 + audit_query_timeout_ms: 2000 diff --git a/tests/system/config.lep6-2.yml b/tests/system/config.lep6-2.yml new file mode 100644 index 00000000..db6eb9ac --- /dev/null +++ b/tests/system/config.lep6-2.yml @@ -0,0 +1,60 @@ +# Note: During tests, local loopback/localhost is allowed by the P2P layer +# when INTEGRATION_TEST=true (set by tests). No change needed here. +#hope bulk clever tip road female fly quiz once dose journey sting hedgehog pull area envelope supreme maze project spike brave shed fish live +# Supernode Configuration +supernode: + key_name: "testkey2" + identity: "lumera1cf0ms9ttgdvz6zwlqfty4tjcawhuaq69p40w0c" + host: "0.0.0.0" + port: 4446 + gateway_port: 8003 + +# Keyring Configuration +keyring: + backend: "test" + dir: "keys" + password: "keyring-password" + +# P2P Network Configuration +p2p: + port: 4447 + data_dir: "data/p2p" + +# Lumera Chain Configuration +lumera: + grpc_addr: "localhost:9090" + chain_id: "testing" + +# RaptorQ Configuration +raptorq: + files_dir: "raptorq_files" + +storage_challenge: + # Runtime e2e submits deterministic chain reports manually. Keep the automatic + # dispatcher off so pre-upload P2P allowlist refreshes do not perturb the + # Cascade upload baseline before the heal-op is scheduled. + enabled: false + poll_interval_ms: 5000 + submit_evidence: false + lep6: + enabled: false + max_concurrent_targets: 4 + recipient_read_timeout: 30s + recheck: + enabled: true + lookback_epochs: 7 + max_per_tick: 5 + tick_interval_ms: 60000 + max_failure_attempts_per_ticket: 3 + failure_backoff_ttl_ms: 900000 +self_healing: + enabled: true + poll_interval_ms: 5000 + max_concurrent_reconstructs: 2 + max_concurrent_verifications: 4 + max_concurrent_publishes: 2 + staging_dir: supernode-lep6-data2/heal-staging + verifier_fetch_timeout_ms: 60000 + verifier_fetch_attempts: 3 + verifier_backoff_base_ms: 2000 + audit_query_timeout_ms: 2000 diff --git a/tests/system/config.lep6-3.yml b/tests/system/config.lep6-3.yml new file mode 100644 index 00000000..ccccebc3 --- /dev/null +++ b/tests/system/config.lep6-3.yml @@ -0,0 +1,60 @@ +# Note: During tests, local loopback/localhost is allowed by the P2P layer +# when INTEGRATION_TEST=true (set by tests). No change needed here. +#young envelope urban crucial denial zone toward mansion protect bonus exotic puppy resource pistol expand tell cupboard radio hurry world radio trust explain million +# Supernode Configuration +supernode: + key_name: "testkey3" + identity: "lumera1cjyc4ruq739e2lakuhargejjkr0q5vg6x3d7kp" + host: "0.0.0.0" + port: 4448 + gateway_port: 8004 + +# Keyring Configuration +keyring: + backend: "test" + dir: "keys" + password: "keyring-password" + +# P2P Network Configuration +p2p: + port: 4449 + data_dir: "data/p2p" + +# Lumera Chain Configuration +lumera: + grpc_addr: "localhost:9090" + chain_id: "testing" + +# RaptorQ Configuration +raptorq: + files_dir: "raptorq_files" + +storage_challenge: + # Runtime e2e submits deterministic chain reports manually. Keep the automatic + # dispatcher off so pre-upload P2P allowlist refreshes do not perturb the + # Cascade upload baseline before the heal-op is scheduled. + enabled: false + poll_interval_ms: 5000 + submit_evidence: false + lep6: + enabled: false + max_concurrent_targets: 4 + recipient_read_timeout: 30s + recheck: + enabled: true + lookback_epochs: 7 + max_per_tick: 5 + tick_interval_ms: 60000 + max_failure_attempts_per_ticket: 3 + failure_backoff_ttl_ms: 900000 +self_healing: + enabled: true + poll_interval_ms: 5000 + max_concurrent_reconstructs: 2 + max_concurrent_verifications: 4 + max_concurrent_publishes: 2 + staging_dir: supernode-lep6-data3/heal-staging + verifier_fetch_timeout_ms: 60000 + verifier_fetch_attempts: 3 + verifier_backoff_base_ms: 2000 + audit_query_timeout_ms: 2000 diff --git a/tests/system/e2e_lep6_helpers_test.go b/tests/system/e2e_lep6_helpers_test.go new file mode 100644 index 00000000..bd8ccbc4 --- /dev/null +++ b/tests/system/e2e_lep6_helpers_test.go @@ -0,0 +1,964 @@ +//go:build system_test + +package system + +// This file contains helper functions used by the Supernode LEP-6 system tests. +// +// Why helpers exist here: +// - The audit module behavior depends heavily on block height (epoch boundaries). +// - The systemtest harness runs a real multi-node testnet; we need stable ways to: +// - pick a safe epoch to test against (avoid racing enforcement), +// - derive deterministic peer targets (same logic as the keeper), +// - submit reports via CLI, +// - query results reliably (gRPC where CLI JSON marshalling is known to break). + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + "testing" + "time" + + client "github.com/cometbft/cometbft/rpc/client/http" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "lukechampine.com/blake3" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" +) + +// setAuditParamsForFastEpochs overrides audit module params in genesis so tests complete quickly. +func setAuditParamsForFastEpochs(t *testing.T, epochLengthBlocks uint64, peerQuorumReports, minTargets, maxTargets uint32, requiredOpenPorts []uint32) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + + state := genesis + var err error + + state, err = sjson.SetRawBytes(state, "app_state.audit.params.epoch_length_blocks", []byte(fmt.Sprintf("%q", strconv.FormatUint(epochLengthBlocks, 10)))) + require.NoError(t, err) + // In system tests, start epoch 0 at height 1 (the first block height on a fresh chain). + state, err = sjson.SetRawBytes(state, "app_state.audit.params.epoch_zero_height", []byte(fmt.Sprintf("%q", "1"))) + require.NoError(t, err) + + state, err = sjson.SetRawBytes(state, "app_state.audit.params.peer_quorum_reports", []byte(strconv.FormatUint(uint64(peerQuorumReports), 10))) + require.NoError(t, err) + state, err = sjson.SetRawBytes(state, "app_state.audit.params.min_probe_targets_per_epoch", []byte(strconv.FormatUint(uint64(minTargets), 10))) + require.NoError(t, err) + state, err = sjson.SetRawBytes(state, "app_state.audit.params.max_probe_targets_per_epoch", []byte(strconv.FormatUint(uint64(maxTargets), 10))) + require.NoError(t, err) + + portsJSON, err := json.Marshal(requiredOpenPorts) + require.NoError(t, err) + state, err = sjson.SetRawBytes(state, "app_state.audit.params.required_open_ports", portsJSON) + require.NoError(t, err) + + return state + } +} + +// setSupernodeParamsForAuditTests keeps supernode registration permissive for test environments. +// +// These tests register supernodes and then submit audit reports "on their behalf" using node keys. +// We keep minimum stake and min version permissive so registration is not the bottleneck. +func setSupernodeParamsForAuditTests(t *testing.T) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + + state, err := sjson.SetRawBytes(genesis, "app_state.supernode.params.min_supernode_version", []byte(`"0.0.0"`)) + require.NoError(t, err) + + coinJSON := `{"denom":"ulume","amount":"1"}` + state, err = sjson.SetRawBytes(state, "app_state.supernode.params.minimum_stake_for_sn", []byte(coinJSON)) + require.NoError(t, err) + + return state + } +} + +// ── genesis mutators ───────────────────────────────────────────────────────── + +// setStorageTruthTestParams returns a genesis mutator that overrides storage-truth params +// to enable enforcement at low thresholds so single-recheck submissions are observable. +// +// - mode: proto enum name (e.g. "STORAGE_TRUTH_ENFORCEMENT_MODE_SOFT") +// - postponeThreshold: suspicion score at which the node is postponed (SOFT/FULL only) +// - watchThreshold: suspicion score at which Watch band begins +// - healThreshold: ticket deterioration score at which heal ops are scheduled +// - decayPerEpoch: score decay factor per epoch; 0 maps to 1000/no decay for tests +// - maxHealOps: maximum self-heal ops scheduled per epoch +func setStorageTruthTestParams( + t *testing.T, + mode string, + postponeThreshold, watchThreshold, healThreshold, decayPerEpoch int64, + maxHealOps uint32, +) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + state := genesis + var err error + + // Enum: proto3 JSON string. + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_enforcement_mode", + []byte(fmt.Sprintf("%q", mode))) + require.NoError(t, err) + + // int64 thresholds: proto3 JSON represents int64 as strings. + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_node_suspicion_threshold_postpone", + []byte(fmt.Sprintf("%q", strconv.FormatInt(postponeThreshold, 10)))) + require.NoError(t, err) + + // Set probation midway between watch and postpone. + probation := (watchThreshold + postponeThreshold) / 2 + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_node_suspicion_threshold_probation", + []byte(fmt.Sprintf("%q", strconv.FormatInt(probation, 10)))) + require.NoError(t, err) + + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_node_suspicion_threshold_watch", + []byte(fmt.Sprintf("%q", strconv.FormatInt(watchThreshold, 10)))) + require.NoError(t, err) + + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_ticket_deterioration_heal_threshold", + []byte(fmt.Sprintf("%q", strconv.FormatInt(healThreshold, 10)))) + require.NoError(t, err) + + effectiveDecay := decayPerEpoch + if effectiveDecay == 0 { + effectiveDecay = 1000 + } + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_node_suspicion_decay_per_epoch", + []byte(fmt.Sprintf("%q", strconv.FormatInt(effectiveDecay, 10)))) + require.NoError(t, err) + + // uint32: proto3 JSON number. + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_max_self_heal_ops_per_epoch", + []byte(strconv.FormatUint(uint64(maxHealOps), 10))) + require.NoError(t, err) + + // Extend the local-system-test heal deadline so real reconstruction, + // verifier polling, and tx commit latency fit inside the compressed epoch + // cadence. This preserves production defaults outside the isolated e2e + // genesis. + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_heal_deadline_epochs", + []byte("10")) + require.NoError(t, err) + + // divisor=1 ensures every active node gets an assignment so tests can always + // find a prober for any target (needed to seed transcript records for recheck). + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_challenge_target_divisor", + []byte("1")) + require.NoError(t, err) + + // strong_postpone must be >= postpone to satisfy params.Validate() in InitGenesis. + strongPostpone := postponeThreshold + 200 + state, err = sjson.SetRawBytes(state, + "app_state.audit.params.storage_truth_node_suspicion_threshold_strong_postpone", + []byte(fmt.Sprintf("%q", strconv.FormatInt(strongPostpone, 10)))) + require.NoError(t, err) + + state = seedStorageTruthSyntheticTicketCounts(t, state) + + return state + } +} + +func awaitAtLeastHeight(t *testing.T, height int64, timeout ...time.Duration) { + t.Helper() + if sut.currentHeight >= height { + return + } + sut.AwaitBlockHeight(t, height, timeout...) +} + +// pickEpochForStartAtOrAfter returns the first epoch whose start height is >= minStartHeight. +// This is a "ceiling" epoch picker. +func pickEpochForStartAtOrAfter(originHeight int64, epochBlocks uint64, minStartHeight int64) (epochID uint64, startHeight int64) { + if epochBlocks == 0 { + return 0, originHeight + } + if minStartHeight < originHeight { + minStartHeight = originHeight + } + + blocks := int64(epochBlocks) + delta := minStartHeight - originHeight + epochID = uint64((delta + blocks - 1) / blocks) // ceil(delta/blocks) + startHeight = originHeight + int64(epochID)*blocks + return epochID, startHeight +} + +// nextEpochAfterHeight returns the next epoch after the provided height. +// +// We use this in tests to: +// - register supernodes first, +// - then wait for the *next* epoch boundary to ensure snapshot inclusion and acceptance. +func nextEpochAfterHeight(originHeight int64, epochBlocks uint64, height int64) (epochID uint64, startHeight int64) { + if epochBlocks == 0 { + return 0, originHeight + } + if height < originHeight { + return 0, originHeight + } + blocks := int64(epochBlocks) + currentID := uint64((height - originHeight) / blocks) + epochID = currentID + 1 + startHeight = originHeight + int64(epochID)*blocks + return epochID, startHeight +} + +type testNodeIdentity struct { + nodeName string + accAddr string + valAddr string +} + +// getNodeIdentity reads the node's account and validator operator address from the systemtest keyring. +func getNodeIdentity(t *testing.T, cli *LumeradCli, nodeName string) testNodeIdentity { + t.Helper() + accAddr := cli.GetKeyAddr(nodeName) + valAddr := strings.TrimSpace(cli.Keys("keys", "show", nodeName, "--bech", "val", "-a")) + require.NotEmpty(t, accAddr) + require.NotEmpty(t, valAddr) + return testNodeIdentity{nodeName: nodeName, accAddr: accAddr, valAddr: valAddr} +} + +// registerSupernode registers a supernode using the node's own key as both: +// - the tx signer (via --from), +// - the supernode_account (so that later MsgSubmitEpochReport signatures match). +func registerSupernode(t *testing.T, cli *LumeradCli, id testNodeIdentity, ip string) { + t.Helper() + resp := cli.CustomCommand( + "tx", "supernode", "register-supernode", + id.valAddr, + ip, + id.accAddr, + "--from", id.nodeName, + ) + RequireTxSuccess(t, resp) + sut.AwaitNextBlock(t) +} + +// headerHashAtHeight fetches the block header hash at an exact height. +// The audit module uses ctx.HeaderHash() as the snapshot seed; the assignment logic needs this seed. +func headerHashAtHeight(t *testing.T, rpcAddr string, height int64) []byte { + t.Helper() + httpClient, err := client.New(rpcAddr, "/websocket") + require.NoError(t, err) + require.NoError(t, httpClient.Start()) + t.Cleanup(func() { _ = httpClient.Stop() }) + + res, err := httpClient.Block(context.Background(), &height) + require.NoError(t, err) + hash := res.Block.Header.Hash() + require.True(t, len(hash) >= 8, "expected header hash >= 8 bytes") + return []byte(hash) +} + +func epochSeedAtHeight(t *testing.T, rpcAddr string, height int64, epochID uint64) []byte { + t.Helper() + + raw := headerHashAtHeight(t, rpcAddr, height) + epochBz := make([]byte, 8) + binary.BigEndian.PutUint64(epochBz, epochID) + + var msg bytes.Buffer + msg.WriteString("lumera:epoch-seed") + msg.Write(raw) + msg.Write(epochBz) + + sum := blake3.Sum256(msg.Bytes()) + out := make([]byte, len(sum)) + copy(out, sum[:]) + return out +} + +// computeKEpoch replicates x/audit/v1/keeper.computeKWindow to keep tests deterministic and black-box. +// It computes how many peer targets each sender must probe this epoch. +func computeKEpoch(peerQuorumReports, minTargets, maxTargets uint32, sendersCount, receiversCount int) uint32 { + if sendersCount <= 0 || receiversCount <= 1 { + return 0 + } + + a := uint64(sendersCount) + n := uint64(receiversCount) + q := uint64(peerQuorumReports) + kNeeded := (q*n + a - 1) / a + + kMin := uint64(minTargets) + kMax := uint64(maxTargets) + if kNeeded < kMin { + kNeeded = kMin + } + if kNeeded > kMax { + kNeeded = kMax + } + if kNeeded > n-1 { + kNeeded = n - 1 + } + + return uint32(kNeeded) +} + +// assignedTargets replicates x/audit/v1/keeper.assignedTargets. +// +// Notes: +// - The assignment is order-sensitive; the module enforces that peer observations match targets by index. +// - We use this to build exactly-valid test reports. +func assignedTargets(seed []byte, senders, receivers []string, kWindow uint32, senderSupernodeAccount string) ([]string, bool) { + k := int(kWindow) + if k == 0 || len(receivers) == 0 { + return []string{}, true + } + + senderIndex := -1 + for i, s := range senders { + if s == senderSupernodeAccount { + senderIndex = i + break + } + } + if senderIndex < 0 { + return nil, false + } + if len(seed) < 8 { + return nil, false + } + + n := len(receivers) + offsetU64 := binary.BigEndian.Uint64(seed[:8]) + offset := int(offsetU64 % uint64(n)) + + seen := make(map[int]struct{}, k) + out := make([]string, 0, k) + + for j := 0; j < k; j++ { + slot := senderIndex*k + j + candidate := (offset + slot) % n + + tries := 0 + for tries < n { + if receivers[candidate] != senderSupernodeAccount { + if _, ok := seen[candidate]; !ok { + break + } + } + candidate = (candidate + 1) % n + tries++ + } + if tries >= n { + break + } + + seen[candidate] = struct{}{} + out = append(out, receivers[candidate]) + } + + return out, true +} + +// openPortStates builds PORT_STATE_OPEN entries sized to the keeper-assigned +// required_open_ports list. The audit module rejects reports whose port-state +// count does not match the assigned requirement. +func openPortStates(requiredOpenPorts []uint32) []string { + portStates := make([]string, len(requiredOpenPorts)) + for i := range portStates { + portStates[i] = "PORT_STATE_OPEN" + } + return portStates +} + +// auditHostReportJSON builds the JSON payload for the positional host-report argument. +// HostReport contains float fields (cpu/mem/disk), so we keep values simple. +func auditHostReportJSON(inboundPortStates []string) string { + bz, _ := json.Marshal(map[string]any{ + "cpu_usage_percent": 1.0, + "mem_usage_percent": 1.0, + "disk_usage_percent": 1.0, + "inbound_port_states": inboundPortStates, + "failed_actions_count": 0, + }) + return string(bz) +} + +// storageChallengeObservationJSON builds the JSON payload for --storage-challenge-observations flag. +func storageChallengeObservationJSON(targetSupernodeAccount string, portStates []string) string { + bz, _ := json.Marshal(map[string]any{ + "target_supernode_account": targetSupernodeAccount, + "port_states": portStates, + }) + return string(bz) +} + +// submitEpochReport submits a report using the AutoCLI command: +// +// tx audit submit-epoch-report [epoch-id] [host-report-json] --storage-challenge-observations ... +// +// We keep it as a CLI call to validate the end-to-end integration path (signer handling, encoding). +func submitEpochReport(t *testing.T, cli *LumeradCli, fromNode string, epochID uint64, hostReportJSON string, storageChallengeObservationJSONs []string) string { + t.Helper() + + args := []string{"tx", "audit", "submit-epoch-report", strconv.FormatUint(epochID, 10), hostReportJSON, "--from", fromNode} + for _, obs := range storageChallengeObservationJSONs { + args = append(args, "--storage-challenge-observations", obs) + } + + return cli.CustomCommand(args...) +} + +// querySupernodeLatestState reads the latest supernode state string (e.g. "SUPERNODE_STATE_POSTPONED") via CLI JSON. +func querySupernodeLatestState(t *testing.T, cli *LumeradCli, validatorAddress string) string { + t.Helper() + resp := cli.CustomQuery("q", "supernode", "get-supernode", validatorAddress) + states := gjson.Get(resp, "supernode.states") + require.True(t, states.Exists(), "missing states: %s", resp) + arr := states.Array() + require.NotEmpty(t, arr, "missing states: %s", resp) + return arr[len(arr)-1].Get("state").String() +} + +// gjsonUint64 is a small helper because some CLI outputs represent uint64 as strings. +func gjsonUint64(v gjson.Result) uint64 { + if !v.Exists() { + return 0 + } + if v.Type == gjson.Number { + return uint64(v.Uint()) + } + if v.Type == gjson.String { + out, err := strconv.ParseUint(v.String(), 10, 64) + if err != nil { + return 0 + } + return out + } + return 0 +} + +func sortedStrings(in ...string) []string { + out := append([]string(nil), in...) + sort.Strings(out) + return out +} + +// newAuditQueryClient creates a gRPC query client against node0's gRPC endpoint. +// +// - `EpochReport` contains float fields; CLI JSON marshalling for those fields is currently broken +// in this environment and fails with "unknown type float64". +func newAuditQueryClient(t *testing.T) (audittypes.QueryClient, func()) { + t.Helper() + conn, err := grpc.Dial("localhost:9090", grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + closeFn := func() { _ = conn.Close() } + t.Cleanup(closeFn) + return audittypes.NewQueryClient(conn), closeFn +} + +// auditQueryReport queries a stored report via gRPC. +func auditQueryReport(t *testing.T, epochID uint64, reporterSupernodeAccount string) audittypes.EpochReport { + t.Helper() + qc, _ := newAuditQueryClient(t) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + resp, err := qc.EpochReport(ctx, &audittypes.QueryEpochReportRequest{ + EpochId: epochID, + SupernodeAccount: reporterSupernodeAccount, + }) + require.NoError(t, err) + return resp.Report +} + +func auditQueryReporterReliabilityState(t *testing.T, reporterSupernodeAccount string) audittypes.ReporterReliabilityState { + t.Helper() + qc, _ := newAuditQueryClient(t) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + resp, err := qc.ReporterReliabilityState(ctx, &audittypes.QueryReporterReliabilityStateRequest{ + ReporterSupernodeAccount: reporterSupernodeAccount, + }) + require.NoError(t, err) + return resp.State +} + +func auditQueryAssignedTargets(t *testing.T, epochID uint64, filterByEpochID bool, proberSupernodeAccount string) audittypes.QueryAssignedTargetsResponse { + t.Helper() + qc, _ := newAuditQueryClient(t) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + resp, err := qc.AssignedTargets(ctx, &audittypes.QueryAssignedTargetsRequest{ + EpochId: epochID, + FilterByEpochId: filterByEpochID, + SupernodeAccount: proberSupernodeAccount, + }) + require.NoError(t, err) + return *resp +} + +func awaitCurrentEpochAnchorWithActiveSupernodes(t *testing.T, minEpochID uint64, expectedAccounts ...string) audittypes.EpochAnchor { + t.Helper() + qc, _ := newAuditQueryClient(t) + deadline := time.Now().Add(2 * time.Minute) + var last audittypes.EpochAnchor + var lastErr error + + for time.Now().Before(deadline) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + resp, err := qc.CurrentEpochAnchor(ctx, &audittypes.QueryCurrentEpochAnchorRequest{}) + cancel() + if err == nil { + last = resp.Anchor + if last.EpochId >= minEpochID && containsAllStrings(last.ActiveSupernodeAccounts, expectedAccounts...) && containsAllStrings(last.TargetSupernodeAccounts, expectedAccounts...) { + return last + } + } else { + lastErr = err + } + sut.AwaitNextBlock(t) + } + + require.FailNowf(t, + "epoch anchor did not include expected supernodes", + "min_epoch_id=%d expected=%v last_epoch_id=%d last_active=%v last_targets=%v last_err=%v", + minEpochID, + expectedAccounts, + last.EpochId, + last.ActiveSupernodeAccounts, + last.TargetSupernodeAccounts, + lastErr, + ) + return audittypes.EpochAnchor{} +} + +func containsAllStrings(values []string, needles ...string) bool { + for _, needle := range needles { + if !containsString(values, needle) { + return false + } + } + return true +} + +// setStorageTruthEnforcementModeUnspecified sets enforcement_mode=UNSPECIFIED in genesis. +// Use this for tests that rely on the k-based peer-assignment formula rather than the +// storage-truth one-third coverage formula that activates under any non-UNSPECIFIED mode. +func setStorageTruthEnforcementModeUnspecified(t *testing.T) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + state, err := sjson.SetRawBytes(genesis, + "app_state.audit.params.storage_truth_enforcement_mode", + []byte(`"STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED"`)) + require.NoError(t, err) + return state + } +} + +func seedStorageTruthSyntheticTicketCounts(t *testing.T, genesis []byte) []byte { + t.Helper() + + ticketIDs := []string{ + "sys-test-ticket-recheck-1", + "sys-test-ticket-soft-postpone", + "sys-test-ticket-shadow-nopostpone", + "sys-test-ticket-heal-lifecycle-1", + "edge-ticket-full-mode-recent", + "edge-ticket-full-mode-old", + "edge-ticket-unspecified", + "edge-ticket-failed-heal", + "edge-ticket-replay", + } + for i := 0; i < 3; i++ { + ticketIDs = append(ticketIDs, fmt.Sprintf("edge-ticket-decay-%d", i)) + } + for i := 0; i < 4; i++ { + ticketIDs = append(ticketIDs, fmt.Sprintf("multi-ticket-%d", i)) + } + + states := make([]map[string]any, 0, len(ticketIDs)) + for _, ticketID := range ticketIDs { + states = append(states, map[string]any{ + "ticket_id": ticketID, + "index_artifact_count": 8, + "symbol_artifact_count": 8, + }) + } + bz, err := json.Marshal(states) + require.NoError(t, err) + + state, err := sjson.SetRawBytes(genesis, "app_state.audit.ticket_artifact_count_states", bz) + require.NoError(t, err) + return state +} + +// buildStorageProofResultJSON builds a single StorageProofResult JSON object for the +// --storage-proof-results CLI flag. +// +// Uses INVALID_TRANSCRIPT result class: score-neutral (nodeSuspicion=0, ticketDeterioration=0) +// but recheck-eligible, so it seeds the on-chain transcript KV store without corrupting +// any node-suspicion or ticket-deterioration score assertions in the test. +func buildStorageProofResultJSONWithClass(challengerAcct, targetAcct, ticketID, transcriptHash, bucketType, resultClass string) string { + return buildStorageProofResultJSONWithClassAndCount(challengerAcct, targetAcct, ticketID, transcriptHash, bucketType, resultClass, 8) +} + +func buildStorageProofResultJSONWithClassAndCount(challengerAcct, targetAcct, ticketID, transcriptHash, bucketType, resultClass string, artifactCount uint32) string { + bz, _ := json.Marshal(map[string]any{ + "target_supernode_account": targetAcct, + "challenger_supernode_account": challengerAcct, + "ticket_id": ticketID, + "transcript_hash": transcriptHash, + "bucket_type": bucketType, + "result_class": resultClass, + "artifact_class": "STORAGE_PROOF_ARTIFACT_CLASS_INDEX", + "artifact_key": "seed-artifact-key", + "artifact_ordinal": 0, + "artifact_count": artifactCount, + "derivation_input_hash": "seed-derivation-hash", + "challenger_signature": "seed-challenger-signature", + }) + return string(bz) +} + +func buildStorageProofResultJSON(challengerAcct, targetAcct, ticketID, transcriptHash, bucketType string) string { + return buildStorageProofResultJSONWithClass( + challengerAcct, + targetAcct, + ticketID, + transcriptHash, + bucketType, + "STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT", + ) +} + +// submitEpochReportWithProofResults submits an epoch report that includes storage proof results +// via the AutoCLI --storage-proof-results flag. Uses an empty host report (no port measurements). +func submitEpochReportWithProofResults(t *testing.T, cli *LumeradCli, fromNode string, epochID uint64, proofResultJSONs []string) string { + t.Helper() + args := []string{ + "tx", "audit", "submit-epoch-report", + strconv.FormatUint(epochID, 10), + auditHostReportJSON([]string{}), + "--from", fromNode, + } + for _, pr := range proofResultJSONs { + args = append(args, "--storage-proof-results", pr) + } + return cli.CustomCommand(args...) +} + +type transcriptSeed struct { + ticketID string + transcriptHash string +} + +func containsString(values []string, needle string) bool { + for _, value := range values { + if value == needle { + return true + } + } + return false +} + +func findAssignedProberForTarget( + t *testing.T, + epochID uint64, + candidates []testNodeIdentity, + targetAcct string, +) (audittypes.QueryAssignedTargetsResponse, testNodeIdentity) { + t.Helper() + + var fallbackResp audittypes.QueryAssignedTargetsResponse + var fallbackProber testNodeIdentity + for _, candidate := range candidates { + resp := auditQueryAssignedTargets(t, epochID, true, candidate.accAddr) + if !containsString(resp.TargetSupernodeAccounts, targetAcct) { + continue + } + if candidate.accAddr != targetAcct { + return resp, candidate + } + fallbackResp = resp + fallbackProber = candidate + } + if fallbackProber.accAddr != "" { + return fallbackResp, fallbackProber + } + + require.FailNowf(t, "no assigned prober", "no candidate assigned to target %q in epoch %d", targetAcct, epochID) + return audittypes.QueryAssignedTargetsResponse{}, testNodeIdentity{} +} + +func findAssignedProberAndTarget( + t *testing.T, + epochID uint64, + candidates []testNodeIdentity, +) (audittypes.QueryAssignedTargetsResponse, testNodeIdentity, testNodeIdentity) { + t.Helper() + + byAccount := make(map[string]testNodeIdentity, len(candidates)) + for _, candidate := range candidates { + byAccount[candidate.accAddr] = candidate + } + + for _, candidate := range candidates { + resp := auditQueryAssignedTargets(t, epochID, true, candidate.accAddr) + for _, targetAcct := range resp.TargetSupernodeAccounts { + target, ok := byAccount[targetAcct] + if ok && target.accAddr != candidate.accAddr { + return resp, candidate, target + } + } + } + + require.FailNowf(t, "no assigned prober/target pair", "no candidate had an assigned registered target in epoch %d", epochID) + return audittypes.QueryAssignedTargetsResponse{}, testNodeIdentity{}, testNodeIdentity{} +} + +// seedProofTranscripts seeds on-chain transcript records so that subsequent +// SubmitStorageRecheckEvidence calls can reference a valid challenged_result_transcript_hash. +// +// It queries assignments to find which node in candidates is assigned targetAcct, +// submits an epoch report with INVALID_TRANSCRIPT results from that prober, then +// returns the rechecker node (any candidate ≠ prober). +// +// For fullMode=true (FULL enforcement), exactly one seed is expected and both RECENT and OLD +// results are included to satisfy compound-coverage validation. For fullMode=false, one +// RECENT result is generated per seed. +// ── gRPC query helpers ──────────────────────────────────────────────────────── + +func auditQueryNodeSuspicionStateST(t *testing.T, supernodeAccount string) (audittypes.NodeSuspicionState, bool) { + t.Helper() + conn, err := grpc.Dial("localhost:9090", grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + qc := audittypes.NewQueryClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err := qc.NodeSuspicionState(ctx, &audittypes.QueryNodeSuspicionStateRequest{ + SupernodeAccount: supernodeAccount, + }) + if err != nil { + return audittypes.NodeSuspicionState{}, false + } + return resp.State, true +} + +func auditQueryTicketDeteriorationStateST(t *testing.T, ticketID string) (audittypes.TicketDeteriorationState, bool) { + t.Helper() + conn, err := grpc.Dial("localhost:9090", grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + qc := audittypes.NewQueryClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err := qc.TicketDeteriorationState(ctx, &audittypes.QueryTicketDeteriorationStateRequest{ + TicketId: ticketID, + }) + if err != nil { + return audittypes.TicketDeteriorationState{}, false + } + return resp.State, true +} + +func auditQueryHealOpsByTicketST(t *testing.T, ticketID string) []audittypes.HealOp { + t.Helper() + conn, err := grpc.Dial("localhost:9090", grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + qc := audittypes.NewQueryClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err := qc.HealOpsByTicket(ctx, &audittypes.QueryHealOpsByTicketRequest{ + TicketId: ticketID, + }) + if err != nil { + return nil + } + return resp.HealOps +} + +// ── CLI transaction helpers ─────────────────────────────────────────────────── + +func submitStorageRecheckEvidence( + t *testing.T, + cli *LumeradCli, + fromNode string, + epochID uint64, + challengedAccount string, + ticketID string, + challengedHash string, + recheckHash string, + resultClass string, +) string { + t.Helper() + return cli.CustomCommand( + "tx", "audit", "submit-storage-recheck-evidence", + strconv.FormatUint(epochID, 10), + challengedAccount, + ticketID, + "--challenged-result-transcript-hash", challengedHash, + "--recheck-transcript-hash", recheckHash, + "--recheck-result-class", resultClass, + "--gas", "500000", // Per CP3.5 F-B — secondary indexes for recheck reporter result push gas above 200k default. + "--from", fromNode, + ) +} + +func submitClaimHealCompleteST( + t *testing.T, + cli *LumeradCli, + fromNode string, + healOpID uint64, + ticketID string, + manifestHash string, +) string { + t.Helper() + return cli.CustomCommand( + "tx", "audit", "claim-heal-complete", + strconv.FormatUint(healOpID, 10), + ticketID, + manifestHash, + "--from", fromNode, + ) +} + +func submitHealVerificationST( + t *testing.T, + cli *LumeradCli, + fromNode string, + healOpID uint64, + verified bool, + verificationHash string, +) string { + t.Helper() + return cli.CustomCommand( + "tx", "audit", "submit-heal-verification", + strconv.FormatUint(healOpID, 10), + strconv.FormatBool(verified), + verificationHash, + "--from", fromNode, + ) +} + +func seedProofTranscripts( + t *testing.T, + cli *LumeradCli, + epochID uint64, + candidates []testNodeIdentity, + targetAcct string, + seeds []transcriptSeed, + fullMode bool, +) testNodeIdentity { + t.Helper() + return seedProofTranscriptsWithClass(t, cli, epochID, candidates, targetAcct, seeds, fullMode, "STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT") +} + +func seedProofTranscriptsWithClass( + t *testing.T, + cli *LumeradCli, + epochID uint64, + candidates []testNodeIdentity, + targetAcct string, + seeds []transcriptSeed, + fullMode bool, + resultClass string, +) testNodeIdentity { + t.Helper() + + var prober, rechecker testNodeIdentity + proberIdx := -1 + var proberResp audittypes.QueryAssignedTargetsResponse + for i, c := range candidates { + resp := auditQueryAssignedTargets(t, epochID, true, c.accAddr) + for _, a := range resp.TargetSupernodeAccounts { + if a == targetAcct { + prober = c + proberIdx = i + proberResp = resp + break + } + } + if proberIdx >= 0 { + break + } + } + require.GreaterOrEqual(t, proberIdx, 0, + "no candidate assigned to %q in epoch %d — check challenge_target_divisor=1 in genesis", targetAcct, epochID) + for i, c := range candidates { + if i != proberIdx && c.accAddr != targetAcct { + rechecker = c + break + } + } + require.NotEmpty(t, rechecker.accAddr, "no rechecker available — candidates must include a node distinct from prober and target") + + // Build port states sized to required_open_ports (chain rejects mismatched lengths). + portStates := make([]string, len(proberResp.RequiredOpenPorts)) + for j := range portStates { + portStates[j] = "PORT_STATE_OPEN" + } + + // Probers must include peer observations for ALL assigned targets. + var observations []string + for _, tgt := range proberResp.TargetSupernodeAccounts { + observations = append(observations, storageChallengeObservationJSON(tgt, portStates)) + } + + var proofResults []string + for _, s := range seeds { + proofResults = append(proofResults, buildStorageProofResultJSONWithClass( + prober.accAddr, targetAcct, s.ticketID, s.transcriptHash, + "STORAGE_PROOF_BUCKET_TYPE_RECENT", + resultClass, + )) + if fullMode { + // FULL mode requires both RECENT and OLD results for every assigned target. + proofResults = append(proofResults, buildStorageProofResultJSONWithClass( + prober.accAddr, targetAcct, s.ticketID, s.transcriptHash+"-old-seed", + "STORAGE_PROOF_BUCKET_TYPE_OLD", + resultClass, + )) + } + } + + // Submit full epoch report: host report + peer observations + proof results. + args := []string{ + "tx", "audit", "submit-epoch-report", + strconv.FormatUint(epochID, 10), + auditHostReportJSON(portStates), + "--from", prober.nodeName, + "--gas", "500000", + } + for _, obs := range observations { + args = append(args, "--storage-challenge-observations", obs) + } + for _, pr := range proofResults { + args = append(args, "--storage-proof-results", pr) + } + seedResp := cli.CustomCommand(args...) + RequireTxSuccess(t, seedResp) + sut.AwaitNextBlock(t) + + return rechecker +} diff --git a/tests/system/e2e_lep6_runtime_test.go b/tests/system/e2e_lep6_runtime_test.go new file mode 100644 index 00000000..09043370 --- /dev/null +++ b/tests/system/e2e_lep6_runtime_test.go @@ -0,0 +1,515 @@ +//go:build system_test + +package system + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/sdk/action" + sdkconfig "github.com/LumeraProtocol/supernode/v2/sdk/config" + "github.com/LumeraProtocol/supernode/v2/sdk/event" + "github.com/LumeraProtocol/supernode/v2/supernode/config" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +// TestLEP6RuntimeE2E_CascadeChallengeHealVerifyAndStore mirrors the shape of +// TestCascadeE2E, but extends it through the LEP-6 runtime path: +// +// 1. start real lumerad + three real supernode processes; +// 2. upload a real CASCADE action and prove normal download works; +// 3. submit a real storage-challenge epoch report for that action/ticket; +// 4. wait for chain to schedule a heal-op; +// 5. let the assigned healer supernode reconstruct+stage and claim; +// 6. let assigned verifier supernodes fetch healer-served bytes and verify; +// 7. wait for chain VERIFIED and finalizer publish; +// 8. download the action again and assert bytes still match the original. +// +// The storage-challenge report is driven by the test so the failure is +// deterministic; the healer/verifier/finalizer data-plane is driven by the real +// supernode self_healing services. +func TestLEP6RuntimeE2E_CascadeChallengeHealVerifyAndStore(t *testing.T) { + os.Setenv("INTEGRATION_TEST", "true") + os.Setenv("LUMERA_SUPERNODE_DISABLE_HOST_REPORTER", "1") + defer os.Unsetenv("INTEGRATION_TEST") + defer os.Unsetenv("LUMERA_SUPERNODE_DISABLE_HOST_REPORTER") + + const ( + epochLengthBlocks = uint64(12) + originHeight = int64(1) + lumeraGRPCAddr = "localhost:9090" + lumeraChainID = "testing" + testKeyName = "testkey1" + testMnemonic = "odor kiss switch swarm spell make planet bundle skate ozone path planet exclude butter atom ahead angle royal shuffle door prevent merry alter robust" + testKey2Mnemonic = "club party current length duck agent love into slide extend spawn sentence kangaroo chunk festival order plate rare public good include situate liar miss" + testKey3Mnemonic = "young envelope urban crucial denial zone toward mansion protect bonus exotic puppy resource pistol expand tell cupboard radio hurry world radio trust explain million" + expectedAddress = "lumera1em87kgrvgttrkvuamtetyaagjrhnu3vjy44at4" + userKeyName = "user" + userMnemonic = "little tone alley oval festival gloom sting asthma crime select swap auto when trip luxury pact risk sister pencil about crisp upon opera timber" + fundAmount = "1000000ulume" + actionType = "CASCADE" + ) + + t.Log("Step 1: configure genesis and start chain") + sut.ModifyGenesisJSON(t, + SetStakingBondDenomUlume(t), + SetActionParams(t), + SetSupernodeMetricsParams(t), + setSupernodeParamsForAuditTests(t), + setAuditParamsForFastEpochs(t, epochLengthBlocks, 1, 1, 1, []uint32{4444}), + setAuditMissingReportGraceForRuntimeE2E(t), + setStorageTruthTestParams(t, "STORAGE_TRUTH_ENFORCEMENT_MODE_FULL", 1000, 500, 10, 0, 10), + ) + sut.StartChain(t) + cli := NewLumeradCLI(t, sut, true) + + t.Log("Step 2: register and fund three supernodes") + binaryPath := locateExecutable(sut.ExecBinary) + homePath := filepath.Join(WorkDir, sut.outputDir) + recoverChainKey(t, binaryPath, homePath, testKeyName, testMnemonic) + recoverChainKey(t, binaryPath, homePath, "testkey2", testKey2Mnemonic) + recoverChainKey(t, binaryPath, homePath, "testkey3", testKey3Mnemonic) + recoverChainKey(t, binaryPath, homePath, userKeyName, userMnemonic) + + n0 := getRuntimeSupernodeIdentity(t, cli, "node0", "testkey1") + n1 := getRuntimeSupernodeIdentity(t, cli, "node1", "testkey2") + n2 := getRuntimeSupernodeIdentity(t, cli, "node2", "testkey3") + registerRuntimeSupernode(t, cli, "node0", n0, "localhost:4444", "4445") + registerRuntimeSupernode(t, cli, "node1", n1, "localhost:4446", "4447") + registerRuntimeSupernode(t, cli, "node2", n2, "localhost:4448", "4449") + cli.FundAddress(n0.accAddr, "100000ulume") + cli.FundAddress(n1.accAddr, "100000ulume") + cli.FundAddress(n2.accAddr, "100000ulume") + bootstrapRuntimeSupernodeEligibility(t, cli) + + t.Log("Step 3: recover user/test keys and start real supernodes") + recoveredAddress := cli.GetKeyAddr(testKeyName) + require.Equal(t, expectedAddress, recoveredAddress) + userAddress := cli.GetKeyAddr(userKeyName) + cli.FundAddress(recoveredAddress, fundAmount) + cli.FundAddress(userAddress, fundAmount) + sut.AwaitNextBlock(t) + + cmds := StartLEP6Supernodes(t) + defer StopAllSupernodes(cmds) + time.Sleep(40 * time.Second) // Match Cascade e2e: allow supernode P2P/DHT routing to settle before upload. + + t.Log("Step 4: upload a real Cascade action through the SDK/supernodes") + ctx := context.Background() + kr, err := keyring.InitKeyring(config.KeyringConfig{Backend: "memory", Dir: ""}) + require.NoError(t, err) + _, err = keyring.RecoverAccountFromMnemonic(kr, testKeyName, testMnemonic) + require.NoError(t, err) + userRecord, err := keyring.RecoverAccountFromMnemonic(kr, userKeyName, userMnemonic) + require.NoError(t, err) + userLocalAddr, err := userRecord.GetAddress() + require.NoError(t, err) + require.Equal(t, userAddress, userLocalAddr.String()) + + lumeraCfg, err := lumera.NewConfig(lumeraGRPCAddr, lumeraChainID, userKeyName, kr) + require.NoError(t, err) + lumeraClient, err := lumera.NewClient(ctx, lumeraCfg) + require.NoError(t, err) + defer lumeraClient.Close() + + actionClient, err := action.NewClient(ctx, sdkconfig.Config{ + Account: sdkconfig.AccountConfig{KeyName: userKeyName, Keyring: kr}, + Lumera: sdkconfig.LumeraConfig{GRPCAddr: lumeraGRPCAddr, ChainID: lumeraChainID}, + }, nil) + require.NoError(t, err) + + testFileFullpath := filepath.Join("test.txt") + originalData := readFileBytes(t, testFileFullpath) + originalHash := sha256.Sum256(originalData) + + actionID := requestAndStartCascadeAction(t, ctx, cli, lumeraClient, actionClient, testFileFullpath, actionType) + require.NoError(t, waitForActionStateWithClient(ctx, lumeraClient, actionID, actiontypes.ActionStateDone)) + artifactCounts := requireFinalizedCascadeArtifactCounts(t, ctx, lumeraClient, actionID) + + t.Log("Step 5: prove pre-heal Cascade download works") + preHealDir := t.TempDir() + downloadAndAssertCascadeBytes(t, ctx, actionClient, actionID, userAddress, preHealDir, originalData, originalHash) + + t.Log("Step 6: submit deterministic storage-challenge report for the Cascade action ticket") + currentHeight := sut.AwaitNextBlock(t) + epochID, epochStart := nextEpochAfterHeight(originHeight, epochLengthBlocks, currentHeight) + epochEnd := epochStart + int64(epochLengthBlocks) + awaitAtLeastHeight(t, epochStart) + anchor := awaitCurrentEpochAnchorWithActiveSupernodes(t, epochID, n0.accAddr, n1.accAddr, n2.accAddr) + require.ElementsMatch(t, []string{n0.accAddr, n1.accAddr, n2.accAddr}, anchor.ActiveSupernodeAccounts) + + nodes := []testNodeIdentity{n0, n1, n2} + proberResp, prober, target := findAssignedProberAndTarget(t, epochID, nodes) + portStates := openPortStates(proberResp.RequiredOpenPorts) + reportArgs := []string{ + "tx", "audit", "submit-epoch-report", + strconv.FormatUint(epochID, 10), + auditHostReportJSON(portStates), + "--from", prober.nodeName, + "--gas", "500000", + } + for _, assignedTarget := range proberResp.TargetSupernodeAccounts { + reportArgs = append(reportArgs, "--storage-challenge-observations", storageChallengeObservationJSON(assignedTarget, portStates)) + } + reportArgs = append(reportArgs, + "--storage-proof-results", buildStorageProofResultJSONWithClassAndCount( + prober.accAddr, + target.accAddr, + actionID, + "runtime-e2e-recent-hash-mismatch", + "STORAGE_PROOF_BUCKET_TYPE_RECENT", + "STORAGE_PROOF_RESULT_CLASS_HASH_MISMATCH", + artifactCounts.index, + ), + "--storage-proof-results", buildStorageProofResultJSONWithClassAndCount( + prober.accAddr, + target.accAddr, + actionID, + "runtime-e2e-old-hash-mismatch", + "STORAGE_PROOF_BUCKET_TYPE_OLD", + "STORAGE_PROOF_RESULT_CLASS_HASH_MISMATCH", + artifactCounts.index, + ), + ) + reportResp := cli.CustomCommand(reportArgs...) + RequireTxSuccess(t, reportResp) + sut.AwaitNextBlock(t) + + ticketBefore, found := auditQueryTicketDeteriorationStateST(t, actionID) + require.True(t, found, "storage challenge failure for the action/ticket must create deterioration state") + require.GreaterOrEqual(t, ticketBefore.DeteriorationScore, int64(10), "ticket score must cross heal threshold before scheduling") + + t.Log("Step 7: wait for chain heal-op schedule and real supernode self-healing runtime") + awaitAtLeastHeight(t, epochEnd) + sut.AwaitNextBlock(t) + healOps := auditQueryHealOpsByTicketST(t, actionID) + require.Len(t, healOps, 1, "chain must schedule one heal op for the deteriorated Cascade action ticket") + healOp := healOps[0] + require.False(t, isFinalStatusForRuntimeE2E(healOp.Status), "newly observed heal op must not already be final: %s", healOp.Status.String()) + require.NotEmpty(t, healOp.HealerSupernodeAccount) + require.NotEmpty(t, healOp.VerifierSupernodeAccounts) + + verified := awaitAnyHealOpStatusByTicket(t, actionID, audittypes.HealOpStatus_HEAL_OP_STATUS_VERIFIED, 6*time.Minute) + require.NotEmpty(t, verified.ResultHash, "real healer must submit the reconstructed file BLAKE3 manifest hash before verifier quorum") + + healerDataDir := dataDirForSupernodeAccount(t, verified.HealerSupernodeAccount, n0, n1, n2) + stagingDir := filepath.Join(healerDataDir, "heal-staging", fmt.Sprintf("%d", verified.HealOpId)) + awaitStagingDirRemoved(t, stagingDir, 90*time.Second) + + ticketAfter, found := auditQueryTicketDeteriorationStateST(t, actionID) + require.True(t, found) + require.Less(t, ticketAfter.DeteriorationScore, ticketBefore.DeteriorationScore, "VERIFIED heal must reduce ticket deterioration") + + t.Log("Step 8: prove post-heal Cascade data remains retrievable and byte-identical") + postHealDir := t.TempDir() + downloadAndAssertCascadeBytes(t, ctx, actionClient, actionID, userAddress, postHealDir, originalData, originalHash) +} + +type finalizedCascadeArtifactCounts struct { + index uint32 + symbol uint32 +} + +func requireFinalizedCascadeArtifactCounts(t *testing.T, ctx context.Context, client lumera.Client, actionID string) finalizedCascadeArtifactCounts { + t.Helper() + resp, err := client.Action().GetAction(ctx, actionID) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Action) + meta, err := cascadekit.UnmarshalCascadeMetadata(resp.Action.Metadata) + require.NoError(t, err) + require.NotZero(t, meta.IndexArtifactCount, "finalized Cascade action metadata must include LEP-6 index artifact count") + require.NotZero(t, meta.SymbolArtifactCount, "finalized Cascade action metadata must include LEP-6 symbol artifact count") + t.Logf("Finalized Cascade artifact counts for action %s: index=%d symbol=%d", actionID, meta.IndexArtifactCount, meta.SymbolArtifactCount) + return finalizedCascadeArtifactCounts{index: meta.IndexArtifactCount, symbol: meta.SymbolArtifactCount} +} + +func recoverChainKey(t *testing.T, binaryPath, homePath, keyName, mnemonic string) { + t.Helper() + cmd := exec.Command(binaryPath, "keys", "add", keyName, "--recover", "--keyring-backend=test", "--home", homePath) + cmd.Stdin = strings.NewReader(mnemonic + "\n") + out, err := cmd.CombinedOutput() + require.NoError(t, err, "recover key %s failed: %s", keyName, string(out)) +} + +func setAuditMissingReportGraceForRuntimeE2E(t *testing.T) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + state, err := sjson.SetRawBytes(genesis, "app_state.audit.params.consecutive_epochs_to_postpone", []byte("100")) + require.NoError(t, err) + return state + } +} + +func readFileBytes(t *testing.T, path string) []byte { + t.Helper() + f, err := os.Open(path) + require.NoError(t, err) + defer f.Close() + b, err := io.ReadAll(f) + require.NoError(t, err) + return b +} + +func getRuntimeSupernodeIdentity(t *testing.T, cli *LumeradCli, validatorKey, supernodeKey string) testNodeIdentity { + t.Helper() + accAddr := cli.GetKeyAddr(supernodeKey) + valAddr := strings.TrimSpace(cli.Keys("keys", "show", validatorKey, "--bech", "val", "-a")) + require.NotEmpty(t, accAddr) + require.NotEmpty(t, valAddr) + return testNodeIdentity{nodeName: supernodeKey, accAddr: accAddr, valAddr: valAddr} +} + +func registerRuntimeSupernode(t *testing.T, cli *LumeradCli, signerKey string, id testNodeIdentity, grpcAddress, p2pPort string) { + t.Helper() + resp := cli.CustomCommand( + "tx", "supernode", "register-supernode", + id.valAddr, + grpcAddress, + id.accAddr, + "--p2p-port", p2pPort, + "--from", signerKey, + ) + RequireTxSuccess(t, resp) + sut.AwaitNextBlock(t) +} + +func bootstrapRuntimeSupernodeEligibility(t *testing.T, cli *LumeradCli) { + t.Helper() + listResp := cli.CustomQuery("query", "supernode", "list-supernodes", "--output", "json") + t.Logf("Registered supernodes response: %s", listResp) + require.NotEqual(t, "{}", strings.TrimSpace(listResp), "registered supernodes must be visible before Cascade bootstrap") + + queryHeight := sut.AwaitNextBlock(t) + resp := cli.CustomQuery( + "query", "supernode", "get-top-supernodes-for-block", + fmt.Sprint(queryHeight), + "--output", "json", + ) + t.Logf("Bootstrap top-supernodes response at height %d: %s", queryHeight, resp) + require.NotEmpty(t, strings.TrimSpace(resp), "top-supernodes bootstrap query must return a response") +} + +func requestAndStartCascadeAction(t *testing.T, ctx context.Context, cli *LumeradCli, lc lumera.Client, ac action.Client, filePath, actionType string) string { + t.Helper() + meta, price, expiration, err := ac.BuildCascadeMetadataFromFile(ctx, filePath, false, "") + require.NoError(t, err) + metaBytes, err := json.Marshal(meta) + require.NoError(t, err) + fi, err := os.Stat(filePath) + require.NoError(t, err) + fileSizeKbs := (fi.Size() + 1023) / 1024 + resp, err := lc.ActionMsg().RequestAction(ctx, actionType, string(metaBytes), price, expiration, strconv.FormatInt(fileSizeKbs, 10)) + require.NoError(t, err) + require.NotNil(t, resp) + require.Zero(t, resp.TxResponse.Code, "RequestAction tx failed: %s", resp.TxResponse.RawLog) + sut.AwaitNextBlock(t) + + txResp := awaitTxQuery(t, cli, resp.TxResponse.TxHash, 45*time.Second) + require.Equal(t, int64(0), gjson.Get(txResp, "code").Int(), "RequestAction tx query failed: %s", txResp) + actionID := extractActionIDFromTx(t, txResp) + + txHashCh := make(chan string, 1) + completionCh := make(chan struct{}, 1) + errCh := make(chan string, 1) + err = ac.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { + switch e.Type { + case event.SDKTaskTxHashReceived: + if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { + select { + case txHashCh <- txHash: + default: + } + } + case event.SDKTaskCompleted: + select { + case completionCh <- struct{}{}: + default: + } + case event.SDKTaskFailed: + msg, _ := e.Data[event.KeyError].(string) + if msg == "" { + msg = "cascade task failed without an SDK error message" + } + select { + case errCh <- msg: + default: + } + } + }) + require.NoError(t, err) + + time.Sleep(5 * time.Second) + sig, err := ac.GenerateStartCascadeSignatureFromFile(ctx, filePath) + require.NoError(t, err) + _, err = ac.StartCascade(ctx, filePath, actionID, sig) + require.NoError(t, err) + + var finalizeTxHash string + completed := false + timeout := time.After(3 * time.Minute) + for finalizeTxHash == "" || !completed { + select { + case h := <-txHashCh: + if finalizeTxHash == "" { + finalizeTxHash = h + } + case <-completionCh: + completed = true + case msg := <-errCh: + t.Fatalf("cascade task reported failure: %s", msg) + case <-timeout: + t.Fatalf("timeout waiting for cascade SDK events; finalizeTxHash=%q completed=%v", finalizeTxHash, completed) + } + } + finalizeResp := awaitTxQuery(t, cli, finalizeTxHash, 45*time.Second) + require.Equal(t, int64(0), gjson.Get(finalizeResp, "code").Int(), "Cascade finalize tx failed: %s", finalizeResp) + return actionID +} + +func awaitTxQuery(t *testing.T, cli *LumeradCli, txHash string, timeout time.Duration) string { + t.Helper() + deadline := time.Now().Add(timeout) + var last string + binaryPath := locateExecutable(sut.ExecBinary) + for time.Now().Before(deadline) { + cmd := exec.Command(binaryPath, "query", "tx", txHash, "--output", "json", "--node", "tcp://localhost:26657") + outBytes, _ := cmd.CombinedOutput() + out := string(outBytes) + last = out + lower := strings.ToLower(out) + if strings.Contains(lower, "tx not found") || strings.Contains(lower, "rpc error") || strings.Contains(lower, "usage:") { + time.Sleep(time.Second) + continue + } + return out + } + t.Fatalf("tx %s was not queryable before timeout; last=%s", txHash, last) + return "" +} + +func extractActionIDFromTx(t *testing.T, txResp string) string { + t.Helper() + for _, event := range gjson.Get(txResp, "events").Array() { + if event.Get("type").String() != "action_registered" { + continue + } + for _, attr := range event.Get("attributes").Array() { + if attr.Get("key").String() == "action_id" { + return attr.Get("value").String() + } + } + } + t.Fatalf("action_id not found in tx response: %s", txResp) + return "" +} + +func downloadAndAssertCascadeBytes(t *testing.T, ctx context.Context, ac action.Client, actionID, userAddress, outputBaseDir string, originalData []byte, originalHash [32]byte) { + t.Helper() + sig, err := ac.GenerateDownloadSignature(ctx, actionID, userAddress) + require.NoError(t, err) + _, err = ac.DownloadCascade(ctx, actionID, outputBaseDir, sig) + require.NoError(t, err) + outDir := filepath.Join(outputBaseDir, actionID) + require.Eventually(t, func() bool { + entries, err := os.ReadDir(outDir) + return err == nil && len(entries) > 0 + }, 45*time.Second, time.Second, "download output directory should contain reconstructed file") + entries, err := os.ReadDir(outDir) + require.NoError(t, err) + var downloadedPath string + for _, entry := range entries { + if !entry.IsDir() { + downloadedPath = filepath.Join(outDir, entry.Name()) + break + } + } + require.NotEmpty(t, downloadedPath, "download output must contain a file") + downloaded := readFileBytes(t, downloadedPath) + require.Equal(t, len(originalData), len(downloaded), "downloaded size must match original") + require.Equal(t, originalHash, sha256.Sum256(downloaded), "downloaded hash must match original") +} + +func isFinalStatusForRuntimeE2E(status audittypes.HealOpStatus) bool { + switch status { + case audittypes.HealOpStatus_HEAL_OP_STATUS_VERIFIED, + audittypes.HealOpStatus_HEAL_OP_STATUS_FAILED, + audittypes.HealOpStatus_HEAL_OP_STATUS_EXPIRED: + return true + default: + return false + } +} + +func awaitAnyHealOpStatusByTicket(t *testing.T, ticketID string, status audittypes.HealOpStatus, timeout time.Duration) audittypes.HealOp { + t.Helper() + deadline := time.Now().Add(timeout) + var last []audittypes.HealOp + for time.Now().Before(deadline) { + healOps := auditQueryHealOpsByTicketST(t, ticketID) + last = healOps + for _, op := range healOps { + if op.Status == status { + return op + } + } + time.Sleep(2 * time.Second) + } + t.Fatalf("no heal op for ticket %s reached %s before timeout; last=%+v", ticketID, status.String(), last) + return audittypes.HealOp{} +} + +func awaitHealOpStatusByTicket(t *testing.T, ticketID string, healOpID uint64, status audittypes.HealOpStatus, timeout time.Duration) audittypes.HealOp { + t.Helper() + deadline := time.Now().Add(timeout) + var last audittypes.HealOp + for time.Now().Before(deadline) { + for _, op := range auditQueryHealOpsByTicketST(t, ticketID) { + if op.HealOpId == healOpID { + last = op + if op.Status == status { + return op + } + } + } + time.Sleep(3 * time.Second) + } + t.Fatalf("heal op %d for ticket %s did not reach %s before timeout; last=%+v", healOpID, ticketID, status.String(), last) + return audittypes.HealOp{} +} + +func dataDirForSupernodeAccount(t *testing.T, account string, nodes ...testNodeIdentity) string { + t.Helper() + for i, node := range nodes { + if node.accAddr == account { + return filepath.Join(".", fmt.Sprintf("supernode-lep6-data%d", i+1)) + } + } + t.Fatalf("supernode account %q not found in test nodes", account) + return "" +} + +func awaitStagingDirRemoved(t *testing.T, stagingDir string, timeout time.Duration) { + t.Helper() + require.Eventually(t, func() bool { + _, err := os.Stat(stagingDir) + return os.IsNotExist(err) + }, timeout, 3*time.Second, "verified heal finalizer should publish then remove staging dir %s", stagingDir) +} diff --git a/tests/system/e2e_lep6_test.go b/tests/system/e2e_lep6_test.go new file mode 100644 index 00000000..1db994ad --- /dev/null +++ b/tests/system/e2e_lep6_test.go @@ -0,0 +1,60 @@ +package system + +import ( + "fmt" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" +) + +// TestLEP6RealChainIntegration exercises the real Lumera binary/local-chain +// harness. It intentionally avoids mocks: genesis is mutated, lumerad nodes are +// started, audit queries go through the live RPC endpoint, and a LEP-6 tx command +// is submitted far enough to be rejected by the real audit keeper. +func TestLEP6RealChainIntegration(t *testing.T) { + sut.ModifyGenesisJSON(t, SetAuditParams(t)) + sut.StartChain(t) + + cli := NewLumeradCLI(t, sut, true) + + t.Run("audit query surface is available", func(t *testing.T) { + params := cli.CustomQuery("query", "audit", "params", "--output", "json") + require.True(t, gjson.Valid(params), "audit params query must return JSON: %s", params) + require.NotEmpty(t, gjson.Get(params, "params").Raw, "audit params response must contain params: %s", params) + + currentEpoch := cli.CustomQuery("query", "audit", "current-epoch", "--output", "json") + require.True(t, gjson.Valid(currentEpoch), "current epoch query must return JSON: %s", currentEpoch) + }) + + t.Run("heal tx command is wired to chain validation", func(t *testing.T) { + out := runLumeradNoCheck(t, + "tx", "audit", "claim-heal-complete", "999999", "missing-ticket", "missing-manifest-hash", + "--from", "node0", + "--yes", + "--gas", "auto", + "--gas-adjustment", "1.5", + "--fees", "10ulume", + "--broadcast-mode", "sync", + "--output", "json", + ) + require.Contains(t, out, "heal op 999999 not found", "absent heal-op claim should be rejected by the real audit keeper: %s", out) + }) +} + +func runLumeradNoCheck(t *testing.T, args ...string) string { + t.Helper() + binaryPath := locateExecutable(sut.ExecBinary) + homePath := filepath.Join(WorkDir, sut.outputDir) + base := []string{ + "--home", homePath, + "--keyring-backend", "test", + "--chain-id", "testing", + "--node", "tcp://localhost:26657", + } + cmd := exec.Command(binaryPath, append(args, base...)...) + out, _ := cmd.CombinedOutput() + return fmt.Sprintf("%s", out) +} diff --git a/tests/system/genesis_io.go b/tests/system/genesis_io.go index 8b94e742..8b654d28 100644 --- a/tests/system/genesis_io.go +++ b/tests/system/genesis_io.go @@ -65,3 +65,25 @@ func SetDefaultDenoms(t *testing.T, denom string) GenesisMutator { return state } } + +func SetAuditParams(t *testing.T) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + updates := map[string]any{ + "app_state.audit.params.epoch_length_blocks": uint64(5), + "app_state.audit.params.sc_enabled": true, + "app_state.audit.params.sc_challengers_per_epoch": uint32(3), + "app_state.audit.params.storage_truth_enforcement_mode": "STORAGE_TRUTH_ENFORCEMENT_MODE_FULL", + "app_state.audit.params.storage_truth_max_self_heal_ops_per_epoch": uint32(3), + "app_state.audit.params.storage_truth_heal_deadline_epochs": uint32(2), + "app_state.audit.params.storage_truth_heal_verifier_count": uint32(2), + } + state := genesis + var err error + for path, value := range updates { + state, err = sjson.SetBytes(state, path, value) + require.NoError(t, err) + } + return state + } +} diff --git a/tests/system/go.mod b/tests/system/go.mod index aeb95b7a..3222e311 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -11,7 +11,7 @@ replace ( require ( cosmossdk.io/math v1.5.3 - github.com/LumeraProtocol/lumera v1.12.0-rc + github.com/LumeraProtocol/lumera v1.12.0 github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 github.com/cometbft/cometbft v0.38.21 github.com/cosmos/ibc-go/v10 v10.5.0 @@ -19,6 +19,7 @@ require ( github.com/tidwall/sjson v1.2.5 golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b gopkg.in/yaml.v3 v3.0.1 + lukechampine.com/blake3 v1.4.1 ) require ( @@ -39,7 +40,7 @@ require ( github.com/stretchr/testify v1.11.1 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/grpc v1.77.0 // indirect + google.golang.org/grpc v1.77.0 ) require ( @@ -183,7 +184,6 @@ require ( google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gotest.tools/v3 v3.5.2 // indirect - lukechampine.com/blake3 v1.4.1 // indirect nhooyr.io/websocket v1.8.17 // indirect pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect diff --git a/tests/system/go.sum b/tests/system/go.sum index 815a8d71..6c90ab12 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -107,8 +107,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.12.0-rc h1:Mfae496LpjYhf1SvAE/bsmtjgdoOD8WAJFRCier8xsg= -github.com/LumeraProtocol/lumera v1.12.0-rc/go.mod h1:/G9LTPZB+261tHoWoj7q+1fn+O/VV0zzagwLdsThSNo= +github.com/LumeraProtocol/lumera v1.12.0 h1:BHkPF/vCKyGFKtl2MMxtRpUyzraJ96rWY9FniTbG6cU= +github.com/LumeraProtocol/lumera v1.12.0/go.mod h1:/G9LTPZB+261tHoWoj7q+1fn+O/VV0zzagwLdsThSNo= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= diff --git a/tests/system/supernode-utils.go b/tests/system/supernode-utils.go index 4d74d372..ec18d7a4 100644 --- a/tests/system/supernode-utils.go +++ b/tests/system/supernode-utils.go @@ -11,17 +11,23 @@ import ( ) func StartAllSupernodes(t *testing.T) []*exec.Cmd { + return StartSupernodesFromDirs(t, []string{"supernode-data1", "supernode-data2", "supernode-data3"}, "supernode") +} + +func StartLEP6Supernodes(t *testing.T) []*exec.Cmd { + return StartSupernodesFromDirs(t, []string{"supernode-lep6-data1", "supernode-lep6-data2", "supernode-lep6-data3"}, "supernode-lep6") +} + +func StartSupernodesFromDirs(t *testing.T, relDataDirs []string, logPrefix string) []*exec.Cmd { // Determine the project root (assumes tests run from project root) wd, err := os.Getwd() if err != nil { t.Fatalf("unable to get working directory: %v", err) } - // Data directories for all three supernodes - dataDirs := []string{ - filepath.Join(wd, "supernode-data1"), - filepath.Join(wd, "supernode-data2"), - filepath.Join(wd, "supernode-data3"), + dataDirs := make([]string, 0, len(relDataDirs)) + for _, rel := range relDataDirs { + dataDirs = append(dataDirs, filepath.Join(wd, rel)) } cmds := make([]*exec.Cmd, len(dataDirs)) @@ -47,7 +53,7 @@ func StartAllSupernodes(t *testing.T) []*exec.Cmd { "--basedir", dataDir, ) - logPath := filepath.Join(wd, fmt.Sprintf("supernode%d.out", i)) + logPath := filepath.Join(wd, fmt.Sprintf("%s%d.out", logPrefix, i)) logFile, err := os.Create(logPath) if err != nil { t.Fatalf("failed to create supernode log file %s: %v", logPath, err)