From f2424be51da9b417da16690fa3346a23e7e0cf4c Mon Sep 17 00:00:00 2001 From: curtis lee fulton Date: Mon, 8 Dec 2025 18:51:35 -0800 Subject: [PATCH] task: code reorg #71 --- Cargo.lock | 48 ++-- components/src/offer/db.rs | 117 +++----- components/src/offer/http.rs | 64 +---- components/src/offer/memory.rs | 87 ++---- components/src/offer/mod.rs | 1 + components/src/offer/provider.rs | 205 ++++++++++++++ components/tests/common/mock_service.rs | 5 +- components/tests/common/offer.rs | 251 +---------------- components/tests/offer/db_mysql.rs | 36 --- components/tests/offer/db_postgres.rs | 36 --- components/tests/offer/db_sqlite.rs | 42 --- components/tests/offer/http.rs | 42 --- components/tests/offer/memory.rs | 36 --- doc/offer-service-openapi.yaml | 21 +- pingora/Cargo.toml | 1 + pingora/src/backoff.rs | 11 +- pingora/src/balance.rs | 37 ++- pingora/src/discovery.rs | 65 +++-- pingora/src/error.rs | 15 +- pingora/src/health.rs | 5 +- pingora/src/lib.rs | 28 +- pingora/src/pool.rs | 60 ++++ server/src/commands/offer/record.rs | 3 +- server/src/di/delegates.rs | 135 +-------- server/src/di/inject/injectors/balance.rs | 9 +- .../di/inject/injectors/service/balance.rs | 3 + .../tests/features/common/step_functions.rs | 2 + service-api/src/offer.rs | 3 + service/Cargo.toml | 3 +- service/src/discovery/handler.rs | 9 +- service/src/discovery/service.rs | 4 +- service/src/lib.rs | 2 + service/src/lnurl/service.rs | 14 +- service/src/offer/handler.rs | 29 +- service/src/offer/service.rs | 31 ++- service/src/testing/discovery/mod.rs | 1 + service/src/testing/discovery/store.rs | 111 ++++++++ service/src/testing/error.rs | 49 ++++ service/src/testing/mod.rs | 3 + service/src/testing/offer/mod.rs | 1 + service/src/testing/offer/store.rs | 262 ++++++++++++++++++ 41 files changed, 990 insertions(+), 897 deletions(-) create mode 100644 components/src/offer/provider.rs create mode 100644 pingora/src/pool.rs create mode 100644 service/src/testing/discovery/mod.rs create mode 100644 service/src/testing/discovery/store.rs create mode 100644 service/src/testing/error.rs create mode 100644 service/src/testing/mod.rs create mode 100644 service/src/testing/offer/mod.rs create mode 100644 service/src/testing/offer/store.rs diff --git a/Cargo.lock b/Cargo.lock index 4e8dc66..8275ca9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -403,9 +403,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" [[package]] name = "bech32" @@ -429,9 +429,9 @@ dependencies = [ [[package]] name = "bitcoin" -version = "0.32.7" +version = "0.32.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda569d741b895131a88ee5589a467e73e9c4718e958ac9308e4f7dc44b6945" +checksum = "1e499f9fc0407f50fe98af744ab44fa67d409f76b6772e1689ec8485eb0c0f66" dependencies = [ "base58ck", "bech32", @@ -558,9 +558,9 @@ checksum = "6bd91ee7b2422bcb158d90ef4d14f75ef67f340943fc4149891dcce8f8b972a3" [[package]] name = "cc" -version = "1.2.48" +version = "1.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c481bdbf0ed3b892f6f806287d72acd515b352a4ec27a208489b8c1bc839633a" +checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", "jobserver", @@ -1184,9 +1184,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flate2" -version = "1.1.7" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2152dbcb980c05735e2a651d96011320a949eb31a0c8b38b72645ce97dec676" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "libz-ng-sys", @@ -3132,9 +3132,9 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.12.24" +version = "0.12.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +checksum = "b6eff9328d40131d43bd911d42d79eb6a47312002a4daefc9e37f17e74a7701a" dependencies = [ "base64", "bytes", @@ -3819,9 +3819,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "simple_asn1" @@ -4156,11 +4156,11 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "switchgear" -version = "0.1.24" +version = "0.1.25" [[package]] name = "switchgear-components" -version = "0.1.24" +version = "0.1.25" dependencies = [ "anyhow", "async-trait", @@ -4201,7 +4201,7 @@ dependencies = [ [[package]] name = "switchgear-migration" -version = "0.1.24" +version = "0.1.25" dependencies = [ "sea-orm-migration", "tokio", @@ -4209,7 +4209,7 @@ dependencies = [ [[package]] name = "switchgear-pingora" -version = "0.1.24" +version = "0.1.25" dependencies = [ "arc-swap", "async-trait", @@ -4222,6 +4222,7 @@ dependencies = [ "pingora-load-balancing", "rand 0.8.5", "secp256k1 0.31.1", + "switchgear-components", "switchgear-service-api", "thiserror 2.0.17", "tokio", @@ -4230,7 +4231,7 @@ dependencies = [ [[package]] name = "switchgear-server" -version = "0.1.24" +version = "0.1.25" dependencies = [ "anyhow", "async-trait", @@ -4277,7 +4278,7 @@ dependencies = [ [[package]] name = "switchgear-service" -version = "0.1.24" +version = "0.1.25" dependencies = [ "async-trait", "axum", @@ -4287,6 +4288,7 @@ dependencies = [ "chrono", "http", "image", + "indexmap 2.12.1", "jsonwebtoken", "log", "p256", @@ -4298,8 +4300,8 @@ dependencies = [ "secp256k1 0.31.1", "serde", "serde_json", + "sha2", "sqlx", - "switchgear-components", "switchgear-service-api", "thiserror 2.0.17", "tokio", @@ -4310,7 +4312,7 @@ dependencies = [ [[package]] name = "switchgear-service-api" -version = "0.1.24" +version = "0.1.25" dependencies = [ "async-trait", "axum", @@ -4331,7 +4333,7 @@ dependencies = [ [[package]] name = "switchgear-testing" -version = "0.1.24" +version = "0.1.25" dependencies = [ "anyhow", "dotenvy", @@ -4753,9 +4755,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "bitflags 2.10.0", "bytes", diff --git a/components/src/offer/db.rs b/components/src/offer/db.rs index 204676c..ed8975d 100644 --- a/components/src/offer/db.rs +++ b/components/src/offer/db.rs @@ -8,13 +8,10 @@ use sea_orm::{ ColumnTrait, Database, DatabaseConnection, EntityTrait, QueryFilter, QueryOrder, QuerySelect, Set, }; -use sha2::{Digest, Sha256}; use switchgear_migration::OnConflict; use switchgear_migration::{Expr, MigratorTrait}; -use switchgear_service_api::lnurl::LnUrlOfferMetadata; use switchgear_service_api::offer::{ - Offer, OfferMetadata, OfferMetadataSparse, OfferMetadataStore, OfferProvider, OfferRecord, - OfferRecordSparse, OfferStore, + OfferMetadata, OfferMetadataStore, OfferRecord, OfferRecordSparse, OfferStore, }; use switchgear_service_api::service::ServiceErrorSource; use uuid::Uuid; @@ -70,32 +67,49 @@ impl OfferStore for DbOfferStore { &self, partition: &str, id: &Uuid, + sparse: Option, ) -> Result, Self::Error> { - let model = OfferRecordTable::find_by_id((partition.to_string(), *id)) + let sparse = sparse.unwrap_or(true); + + let result = OfferRecordTable::find_by_id((partition.to_string(), *id)) + .find_also_related(OfferMetadataTable) .one(&self.db) .await .map_err(|e| { OfferStoreError::from_db( ServiceErrorSource::Internal, - format!("getting offer for partition {partition} id {id}"), + format!("getting offer with metadata for partition {partition} id {id}",), e, ) })?; - match model { - Some(model) => Ok(Some(OfferRecord { - partition: model.partition, - id: model.id, - offer: OfferRecordSparse { - max_sendable: model.max_sendable as u64, - min_sendable: model.min_sendable as u64, - metadata_id: model.metadata_id, - timestamp: model.timestamp.into(), - expires: model.expires.map(|dt| dt.into()), - }, - })), - None => Ok(None), - } + let (offer_model, metadata_model) = match (result, sparse) { + (Some((offer, Some(metadata))), false) => { + let metadata = serde_json::from_value(metadata.metadata).map_err(|e| { + OfferStoreError::serialization_error( + ServiceErrorSource::Internal, + format!("deserializing metadata for partition {partition} id {id}",), + e, + ) + })?; + (offer, Some(metadata)) + } + (Some((offer, _)), true) => (offer, None), + _ => return Ok(None), + }; + + Ok(Some(OfferRecord { + partition: offer_model.partition, + id: offer_model.id, + offer: OfferRecordSparse { + max_sendable: offer_model.max_sendable as u64, + min_sendable: offer_model.min_sendable as u64, + metadata_id: offer_model.metadata_id, + metadata: metadata_model, + timestamp: offer_model.timestamp.into(), + expires: offer_model.expires.map(|dt| dt.into()), + }, + })) } async fn get_offers( @@ -129,6 +143,7 @@ impl OfferStore for DbOfferStore { max_sendable: model.max_sendable as u64, min_sendable: model.min_sendable as u64, metadata_id: model.metadata_id, + metadata: None, timestamp: model.timestamp.into(), expires: model.expires.map(|dt| dt.into()), }, @@ -555,65 +570,3 @@ impl OfferMetadataStore for DbOfferStore { Ok(result.rows_affected > 0) } } - -#[async_trait] -impl OfferProvider for DbOfferStore { - type Error = OfferStoreError; - - async fn offer( - &self, - _hostname: &str, - partition: &str, - id: &Uuid, - ) -> Result, Self::Error> { - let result = OfferRecordTable::find_by_id((partition.to_string(), *id)) - .find_also_related(OfferMetadataTable) - .one(&self.db) - .await - .map_err(|e| { - OfferStoreError::from_db( - ServiceErrorSource::Internal, - format!("getting offer with metadata for partition {partition} id {id}",), - e, - ) - })?; - - let (offer_model, metadata_model) = match result { - Some((offer, Some(metadata))) => (offer, metadata), - _ => return Ok(None), - }; - - let metadata_sparse: OfferMetadataSparse = serde_json::from_value(metadata_model.metadata) - .map_err(|e| { - OfferStoreError::serialization_error( - ServiceErrorSource::Internal, - format!("deserializing metadata for offer {id} in partition {partition}",), - e, - ) - })?; - - let lnurl_metadata = LnUrlOfferMetadata(metadata_sparse); - let metadata_json_string = serde_json::to_string(&lnurl_metadata).map_err(|e| { - OfferStoreError::serialization_error( - ServiceErrorSource::Internal, - format!("serializing metadata for offer {id} in partition {partition}",), - e, - ) - })?; - - let mut hasher = Sha256::new(); - hasher.update(metadata_json_string.as_bytes()); - let metadata_json_hash = hasher.finalize().into(); - - Ok(Some(Offer { - partition: offer_model.partition, - id: offer_model.id, - max_sendable: offer_model.max_sendable as u64, - min_sendable: offer_model.min_sendable as u64, - metadata_json_string, - metadata_json_hash, - timestamp: offer_model.timestamp.with_timezone(&Utc), - expires: offer_model.expires.map(|dt| dt.with_timezone(&Utc)), - })) - } -} diff --git a/components/src/offer/http.rs b/components/src/offer/http.rs index f213234..cca608b 100644 --- a/components/src/offer/http.rs +++ b/components/src/offer/http.rs @@ -3,12 +3,9 @@ use async_trait::async_trait; use axum::http::{HeaderMap, HeaderValue}; use reqwest::{Certificate, Client, ClientBuilder, IntoUrl, StatusCode}; use rustls::pki_types::CertificateDer; -use sha2::Digest; use std::time::Duration; -use switchgear_service_api::lnurl::LnUrlOfferMetadata; use switchgear_service_api::offer::{ - HttpOfferClient, Offer, OfferMetadata, OfferMetadataStore, OfferProvider, OfferRecord, - OfferStore, + HttpOfferClient, OfferMetadata, OfferMetadataStore, OfferRecord, OfferStore, }; use switchgear_service_api::service::ServiceErrorSource; use url::Url; @@ -154,8 +151,11 @@ impl OfferStore for HttpOfferStore { &self, partition: &str, id: &Uuid, + sparse: Option, ) -> Result, Self::Error> { + let sparse = sparse.unwrap_or(true); let url = self.offers_partition_id_url(partition, id); + let url = format!("{url}?sparse={sparse}"); let response = self.client.get(&url).send().await.map_err(|e| { OfferStoreError::http_error(ServiceErrorSource::Upstream, format!("get offer {url}"), e) })?; @@ -425,62 +425,6 @@ impl OfferMetadataStore for HttpOfferStore { } } -#[async_trait] -impl OfferProvider for HttpOfferStore { - type Error = OfferStoreError; - - async fn offer( - &self, - _hostname: &str, - partition: &str, - id: &Uuid, - ) -> Result, Self::Error> { - if let Some(offer) = self.get_offer(partition, id).await? { - let offer_metadata = match self - .get_metadata(partition, &offer.offer.metadata_id) - .await? - { - Some(metadata) => metadata, - None => { - return Ok(None); - } - }; - - let lnurl_metadata = LnUrlOfferMetadata(offer_metadata.metadata); - let metadata_json_string = serde_json::to_string(&lnurl_metadata).map_err(|e| { - OfferStoreError::serialization_error( - ServiceErrorSource::Internal, - format!("building LNURL offer response for offer {}", offer.id), - e, - ) - })?; - - let metadata_json_hash = sha2::Sha256::digest(metadata_json_string.as_bytes()) - .to_vec() - .try_into() - .map_err(|_| { - OfferStoreError::hash_conversion_error( - ServiceErrorSource::Internal, - format!("generating metadata hash for offer {}", offer.id), - ) - })?; - - Ok(Some(Offer { - partition: offer.partition, - id: offer.id, - max_sendable: offer.offer.max_sendable, - min_sendable: offer.offer.min_sendable, - metadata_json_string, - metadata_json_hash, - timestamp: offer.offer.timestamp, - expires: offer.offer.expires, - })) - } else { - Ok(None) - } - } -} - #[async_trait] impl HttpOfferClient for HttpOfferStore { async fn health(&self) -> Result<(), ::Error> { diff --git a/components/src/offer/memory.rs b/components/src/offer/memory.rs index c245575..1e963fb 100644 --- a/components/src/offer/memory.rs +++ b/components/src/offer/memory.rs @@ -1,13 +1,8 @@ use crate::offer::error::OfferStoreError; use async_trait::async_trait; -use sha2::Digest; use std::collections::HashMap; use std::sync::Arc; -use switchgear_service_api::lnurl::LnUrlOfferMetadata; -use switchgear_service_api::offer::{ - Offer, OfferMetadata, OfferMetadataStore, OfferProvider, OfferRecord, OfferStore, -}; -use switchgear_service_api::service::ServiceErrorSource; +use switchgear_service_api::offer::{OfferMetadata, OfferMetadataStore, OfferRecord, OfferStore}; use tokio::sync::Mutex; use uuid::Uuid; @@ -52,11 +47,25 @@ impl OfferStore for MemoryOfferStore { &self, partition: &str, id: &Uuid, + sparse: Option, ) -> Result, Self::Error> { + let sparse = sparse.unwrap_or(true); + let metadata_store = self.metadata.lock().await; let store = self.offer.lock().await; - Ok(store - .get(&(partition.to_string(), *id)) - .map(|o| o.offer.clone())) + + Ok(store.get(&(partition.to_string(), *id)).and_then(|offer| { + if sparse { + Some(offer.offer.clone()) + } else { + metadata_store + .get(&(partition.to_string(), offer.offer.offer.metadata_id)) + .map(|metadata| { + let mut offer = offer.offer.clone(); + offer.offer.metadata = Some(metadata.metadata.metadata.clone()); + offer + }) + } + })) } async fn get_offers( @@ -147,66 +156,6 @@ impl OfferStore for MemoryOfferStore { } } -#[async_trait] -impl OfferProvider for MemoryOfferStore { - type Error = OfferStoreError; - - async fn offer( - &self, - _hostname: &str, - partition: &str, - id: &Uuid, - ) -> Result, Self::Error> { - if let Some(offer) = self.get_offer(partition, id).await? { - let offer_metadata = match self - .get_metadata(partition, &offer.offer.metadata_id) - .await? - { - Some(metadata) => metadata, - None => { - return Ok(None); - } - }; - - let lnurl_metadata = LnUrlOfferMetadata(offer_metadata.metadata); - let metadata_json_string = serde_json::to_string(&lnurl_metadata).map_err(|e| { - OfferStoreError::serialization_error( - ServiceErrorSource::Internal, - format!( - "serializing LnUrlOfferMetadata while building LNURL offer response for {offer:?}" - ), - e, - ) - })?; - - let metadata_json_hash = sha2::Sha256::digest(metadata_json_string.as_bytes()) - .to_vec() - .try_into() - .map_err(|_| { - OfferStoreError::hash_conversion_error( - ServiceErrorSource::Internal, - format!( - "hashing LnUrlOfferMetadata json string {metadata_json_string} while building LNURL offer response for {offer:?}" - ), - ) - })?; - - Ok(Some(Offer { - partition: offer.partition, - id: offer.id, - max_sendable: offer.offer.max_sendable, - min_sendable: offer.offer.min_sendable, - metadata_json_string, - metadata_json_hash, - timestamp: offer.offer.timestamp, - expires: offer.offer.expires, - })) - } else { - Ok(None) - } - } -} - #[async_trait] impl OfferMetadataStore for MemoryOfferStore { type Error = OfferStoreError; diff --git a/components/src/offer/mod.rs b/components/src/offer/mod.rs index 73bcb71..424c50f 100644 --- a/components/src/offer/mod.rs +++ b/components/src/offer/mod.rs @@ -3,3 +3,4 @@ pub(crate) mod db_orm; pub mod error; pub mod http; pub mod memory; +pub mod provider; diff --git a/components/src/offer/provider.rs b/components/src/offer/provider.rs new file mode 100644 index 0000000..c502cc2 --- /dev/null +++ b/components/src/offer/provider.rs @@ -0,0 +1,205 @@ +use crate::offer::error::OfferStoreError; +use async_trait::async_trait; +use sha2::{Digest, Sha256}; +use switchgear_service_api::lnurl::LnUrlOfferMetadata; +use switchgear_service_api::offer::{Offer, OfferProvider, OfferStore}; +use switchgear_service_api::service::ServiceErrorSource; +use uuid::Uuid; + +#[derive(Clone, Debug)] +pub struct StoreOfferProvider { + store: S, +} + +impl StoreOfferProvider { + pub fn new(store: S) -> Self { + Self { store } + } +} + +#[async_trait] +impl OfferProvider for StoreOfferProvider +where + S: OfferStore + Send + Sync + 'static, + S::Error: From, +{ + type Error = S::Error; + + async fn offer( + &self, + _hostname: &str, + partition: &str, + id: &Uuid, + ) -> Result, Self::Error> { + if let Some(offer) = self.store.get_offer(partition, id, Some(false)).await? { + let offer_metadata = match offer.offer.metadata { + Some(metadata) => metadata, + None => { + return Ok(None); + } + }; + + let lnurl_metadata = LnUrlOfferMetadata(offer_metadata); + let metadata_json_string = serde_json::to_string(&lnurl_metadata).map_err(|e| { + OfferStoreError::serialization_error( + ServiceErrorSource::Internal, + format!("building LNURL offer response for offer {}", offer.id), + e, + ) + })?; + + let mut hasher = Sha256::new(); + hasher.update(metadata_json_string.as_bytes()); + let metadata_json_hash = hasher.finalize().into(); + + Ok(Some(Offer { + partition: offer.partition, + id: offer.id, + max_sendable: offer.offer.max_sendable, + min_sendable: offer.offer.min_sendable, + metadata_json_string, + metadata_json_hash, + timestamp: offer.offer.timestamp, + expires: offer.offer.expires, + })) + } else { + Ok(None) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::Utc; + use switchgear_service_api::offer::{ + OfferMetadataIdentifier, OfferMetadataImage, OfferMetadataSparse, OfferRecord, + OfferRecordSparse, + }; + + // Mock OfferStore for testing + #[derive(Clone)] + struct MockOfferStore { + response: Option, + } + + impl MockOfferStore { + fn new(response: Option) -> Self { + Self { response } + } + } + + #[async_trait] + impl OfferStore for MockOfferStore { + type Error = OfferStoreError; + + async fn get_offer( + &self, + _partition: &str, + _id: &Uuid, + _sparse: Option, + ) -> Result, Self::Error> { + Ok(self.response.clone()) + } + + async fn get_offers( + &self, + _partition: &str, + _start: usize, + _count: usize, + ) -> Result, Self::Error> { + Ok(vec![]) + } + + async fn post_offer(&self, _offer: OfferRecord) -> Result, Self::Error> { + Ok(None) + } + + async fn put_offer(&self, _offer: OfferRecord) -> Result { + Ok(false) + } + + async fn delete_offer(&self, _partition: &str, _id: &Uuid) -> Result { + Ok(false) + } + } + + // Test data generator + fn create_offer_with_metadata(offer_id: Uuid, metadata_id: Uuid) -> OfferRecord { + OfferRecord { + partition: "default".to_string(), + id: offer_id, + offer: OfferRecordSparse { + max_sendable: 5000000, + min_sendable: 1000, + metadata_id, + metadata: Some(OfferMetadataSparse { + text: "Test LNURL offer".to_string(), + long_text: Some( + "This is a comprehensive test of the LNURL offer system".to_string(), + ), + image: Some(OfferMetadataImage::Png(vec![0x89, 0x50, 0x4E, 0x47])), + identifier: Some(OfferMetadataIdentifier::Email( + "test@lnurl.com".parse().unwrap(), + )), + }), + timestamp: Utc::now(), + expires: Some(Utc::now() + chrono::Duration::hours(24)), + }, + } + } + + #[tokio::test] + async fn test_offer_provider_successful_retrieval() { + let offer_id = Uuid::new_v4(); + let metadata_id = Uuid::new_v4(); + let offer = create_offer_with_metadata(offer_id, metadata_id); + + let store = MockOfferStore::new(Some(offer)); + let provider = StoreOfferProvider::new(store); + let result = provider + .offer("example.com", "default", &offer_id) + .await + .unwrap(); + + assert!(result.is_some()); + let offer = result.unwrap(); + + // Verify basic offer fields + assert_eq!(offer.id, offer_id); + assert_eq!(offer.max_sendable, 5000000); + assert_eq!(offer.min_sendable, 1000); + + // Verify metadata_json_string is in LNURL format and contains the expected metadata + assert!(offer.metadata_json_string.starts_with("[")); + assert!(offer.metadata_json_string.contains("Test LNURL offer")); + assert!(offer.metadata_json_string.contains("test@lnurl.com")); + + // Verify the JSON string can be deserialized back to LnUrlOfferMetadata + let parsed_metadata: LnUrlOfferMetadata = + serde_json::from_str(&offer.metadata_json_string).unwrap(); + assert_eq!(parsed_metadata.0.text, "Test LNURL offer"); + assert_eq!( + parsed_metadata.0.long_text, + Some("This is a comprehensive test of the LNURL offer system".to_string()) + ); + + // Verify hash is calculated correctly + let expected_hash = sha2::Sha256::digest(offer.metadata_json_string.as_bytes()); + assert_eq!(offer.metadata_json_hash, expected_hash.as_ref()); + } + + #[tokio::test] + async fn test_offer_provider_offer_not_found() { + let store = MockOfferStore::new(None); + let provider = StoreOfferProvider::new(store); + + let non_existent_id = Uuid::new_v4(); + let result = provider + .offer("example.com", "default", &non_existent_id) + .await + .unwrap(); + + assert!(result.is_none()); + } +} diff --git a/components/tests/common/mock_service.rs b/components/tests/common/mock_service.rs index fffe847..a522c2c 100644 --- a/components/tests/common/mock_service.rs +++ b/components/tests/common/mock_service.rs @@ -167,8 +167,11 @@ async fn offer_health() -> StatusCode { async fn get_offer( State(state): State, AxumPath((partition, id)): AxumPath<(String, Uuid)>, + axum::extract::Query(params): axum::extract::Query>, ) -> Result, StatusCode> { - match state.store.get_offer(&partition, &id).await { + let sparse: Option = params.get("sparse").and_then(|s| s.parse().ok()); + + match state.store.get_offer(&partition, &id, sparse).await { Ok(Some(offer)) => Ok(Json(offer)), Ok(None) => Err(StatusCode::NOT_FOUND), Err(_) => Err(StatusCode::INTERNAL_SERVER_ERROR), diff --git a/components/tests/common/offer.rs b/components/tests/common/offer.rs index eec43f1..f9b0f06 100644 --- a/components/tests/common/offer.rs +++ b/components/tests/common/offer.rs @@ -1,10 +1,8 @@ use chrono::{Timelike, Utc}; -use sha2::Digest; use switchgear_components::offer::error::{OfferStoreError, OfferStoreErrorSourceKind}; -use switchgear_service_api::lnurl::LnUrlOfferMetadata; use switchgear_service_api::offer::{ OfferMetadata, OfferMetadataIdentifier, OfferMetadataImage, OfferMetadataSparse, - OfferMetadataStore, OfferProvider, OfferRecord, OfferRecordSparse, OfferStore, + OfferMetadataStore, OfferRecord, OfferRecordSparse, OfferStore, }; use switchgear_service_api::service::ServiceErrorSource; use uuid::Uuid; @@ -24,6 +22,7 @@ pub fn create_test_offer_with_existing_metadata(id: Uuid, metadata_id: Uuid) -> max_sendable: 1000, min_sendable: 100, metadata_id, + metadata: None, timestamp: now, expires: Some(expires), }, @@ -53,6 +52,7 @@ pub fn create_test_offer_with_metadata_id(offer_id: Uuid, metadata_id: Uuid) -> max_sendable: 5000000, min_sendable: 1000, metadata_id, + metadata: None, timestamp: Utc::now(), expires: Some(Utc::now() + chrono::Duration::hours(24)), }, @@ -87,7 +87,7 @@ where ::Error: std::fmt::Debug, { let id = Uuid::new_v4(); - let result = store.get_offer("default", &id).await.unwrap(); + let result = store.get_offer("default", &id, None).await.unwrap(); assert!(result.is_none()); } @@ -103,7 +103,7 @@ where let result = store.post_offer(offer.clone()).await.unwrap(); assert_eq!(result, Some(offer.id)); - let retrieved = store.get_offer("default", &offer_id).await.unwrap(); + let retrieved = store.get_offer("default", &offer_id, None).await.unwrap(); assert!(retrieved.is_some()); assert_eq!(retrieved.unwrap().id, offer_id); } @@ -136,7 +136,7 @@ where let was_created = store.put_offer(offer.clone()).await.unwrap(); assert!(was_created); - let retrieved = store.get_offer("default", &offer_id).await.unwrap(); + let retrieved = store.get_offer("default", &offer_id, None).await.unwrap(); assert!(retrieved.is_some()); assert_eq!(retrieved.unwrap().id, offer_id); } @@ -157,7 +157,7 @@ where let was_created2 = store.put_offer(offer.clone()).await.unwrap(); assert!(!was_created2); - let retrieved = store.get_offer("default", &offer_id).await.unwrap(); + let retrieved = store.get_offer("default", &offer_id, None).await.unwrap(); assert_eq!(retrieved.unwrap().offer.max_sendable, 2000); } @@ -175,7 +175,7 @@ where let deleted = store.delete_offer("default", &offer_id).await.unwrap(); assert!(deleted); - let retrieved = store.get_offer("default", &offer_id).await.unwrap(); + let retrieved = store.get_offer("default", &offer_id, None).await.unwrap(); assert!(retrieved.is_none()); } @@ -376,7 +376,6 @@ where assert_eq!(single_metadata.as_slice(), &expected_metadata[5..6]); } -// OfferProvider tests pub async fn setup_store_with_offer_and_metadata(store: S) -> (S, Uuid, Uuid) where S: OfferStore + OfferMetadataStore, @@ -405,117 +404,6 @@ where (store, offer_id, metadata_id) } -pub async fn test_offer_provider_successful_retrieval(store: S) -where - S: OfferProvider + OfferStore + OfferMetadataStore, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug, -{ - let (store, offer_id, _metadata_id) = setup_store_with_offer_and_metadata(store).await; - - let result = store - .offer("example.com", "default", &offer_id) - .await - .unwrap(); - - assert!(result.is_some()); - let offer = result.unwrap(); - - // Verify basic offer fields - assert_eq!(offer.id, offer_id); - assert_eq!(offer.max_sendable, 5000000); - assert_eq!(offer.min_sendable, 1000); - - // Verify metadata_json_string is in LNURL format and contains the expected metadata - assert!(offer.metadata_json_string.starts_with("[")); - assert!(offer.metadata_json_string.contains("Test LNURL offer")); - assert!(offer.metadata_json_string.contains("test@lnurl.com")); - - // Verify the JSON string can be deserialized back to LnUrlOfferMetadata - let parsed_metadata: LnUrlOfferMetadata = - serde_json::from_str(&offer.metadata_json_string).unwrap(); - assert_eq!(parsed_metadata.0.text, "Test LNURL offer"); - assert_eq!( - parsed_metadata.0.long_text, - Some("This is a comprehensive test of the LNURL offer system".to_string()) - ); - - // Verify hash is calculated correctly - let expected_hash = sha2::Sha256::digest(offer.metadata_json_string.as_bytes()); - assert_eq!(offer.metadata_json_hash, expected_hash.as_ref()); -} - -pub async fn test_offer_provider_offer_not_found(store: S) -where - S: OfferProvider, - ::Error: std::fmt::Debug, -{ - let non_existent_id = Uuid::new_v4(); - - let result = store - .offer("example.com", "default", &non_existent_id) - .await - .unwrap(); - - assert!(result.is_none()); -} - -pub async fn test_offer_provider_metadata_not_found_or_foreign_key_constraint(store: S) -where - S: OfferProvider + OfferStore + OfferMetadataStore, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug + Into, - ::Error: std::fmt::Debug, -{ - // Create offer with valid metadata first - let (store, offer_id, metadata_id) = setup_store_with_offer_and_metadata(store).await; - - store.delete_offer("default", &offer_id).await.unwrap(); - - store - .delete_metadata("default", &metadata_id) - .await - .unwrap(); - - let result = store - .offer("example.com", "default", &offer_id) - .await - .unwrap(); - - assert!(result.is_none()); -} - -pub async fn test_offer_provider_hash_consistency(store: S) -where - S: OfferProvider + OfferStore + OfferMetadataStore, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug, -{ - let (store, offer_id, _) = setup_store_with_offer_and_metadata(store).await; - - // Call offer method multiple times - let result1 = store - .offer("example.com", "default", &offer_id) - .await - .unwrap() - .unwrap(); - let result2 = store - .offer("example.com", "default", &offer_id) - .await - .unwrap() - .unwrap(); - - // Hash should be consistent across calls - assert_eq!(result1.metadata_json_hash, result2.metadata_json_hash); - assert_eq!(result1.metadata_json_string, result2.metadata_json_string); - - // Verify hash matches manual calculation - let manual_hash = sha2::Sha256::digest(result1.metadata_json_string.as_bytes()); - assert_eq!(result1.metadata_json_hash, manual_hash.as_ref()); -} - pub async fn test_post_offer_with_missing_metadata(store: S) where S: OfferStore, @@ -531,6 +419,7 @@ where max_sendable: 1000, min_sendable: 100, metadata_id: non_existent_metadata_id, // This metadata doesn't exist + metadata: None, timestamp: Utc::now(), expires: Some(Utc::now() + chrono::Duration::seconds(3600)), }, @@ -566,6 +455,7 @@ where max_sendable: 1000, min_sendable: 100, metadata_id: non_existent_metadata_id, // This metadata doesn't exist + metadata: None, timestamp: Utc::now(), expires: Some(Utc::now() + chrono::Duration::seconds(3600)), }, @@ -625,124 +515,3 @@ where .unwrap(); assert!(second_result); } - -pub async fn test_offer_provider_different_metadata_different_hashes(store: S) -where - S: OfferProvider + OfferStore + OfferMetadataStore, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug, -{ - // Create two different metadata entries - let metadata1_id = Uuid::new_v4(); - let metadata1 = OfferMetadata { - id: metadata1_id, - partition: "default".to_string(), - metadata: OfferMetadataSparse { - text: "First offer".to_string(), - long_text: None, - image: None, - identifier: None, - }, - }; - store.post_metadata(metadata1).await.unwrap(); - - let metadata2_id = Uuid::new_v4(); - let metadata2 = OfferMetadata { - id: metadata2_id, - partition: "default".to_string(), - metadata: OfferMetadataSparse { - text: "Second offer".to_string(), - long_text: Some("Different description".to_string()), - image: None, - identifier: None, - }, - }; - store.post_metadata(metadata2).await.unwrap(); - - // Create two offers with different metadata - let offer1_id = Uuid::new_v4(); - let offer1 = create_test_offer_with_metadata_id(offer1_id, metadata1_id); - store.post_offer(offer1).await.unwrap(); - - let offer2_id = Uuid::new_v4(); - let offer2 = create_test_offer_with_metadata_id(offer2_id, metadata2_id); - store.post_offer(offer2).await.unwrap(); - - // Retrieve both offers - let result1 = store - .offer("example.com", "default", &offer1_id) - .await - .unwrap() - .unwrap(); - let result2 = store - .offer("example.com", "default", &offer2_id) - .await - .unwrap() - .unwrap(); - - // Should have different metadata strings and hashes - assert_ne!(result1.metadata_json_string, result2.metadata_json_string); - assert_ne!(result1.metadata_json_hash, result2.metadata_json_hash); - - // Verify content differences - assert!(result1.metadata_json_string.contains("First offer")); - assert!(result2.metadata_json_string.contains("Second offer")); - assert!(!result1 - .metadata_json_string - .contains("Different description")); - assert!(result2 - .metadata_json_string - .contains("Different description")); -} - -pub async fn test_offer_provider_valid_current_offer_returns_some(store: S) -where - S: OfferProvider + OfferStore + OfferMetadataStore, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug, - ::Error: std::fmt::Debug, -{ - // Create metadata first - let metadata_id = Uuid::new_v4(); - let metadata = OfferMetadata { - id: metadata_id, - partition: "default".to_string(), - metadata: OfferMetadataSparse { - text: "Valid current offer".to_string(), - long_text: None, - image: None, - identifier: None, - }, - }; - store.post_metadata(metadata).await.unwrap(); - - // Create a valid current offer - let offer_id = Uuid::new_v4(); - let valid_offer = OfferRecord { - partition: "default".to_string(), - id: offer_id, - offer: OfferRecordSparse { - max_sendable: 1000000, - min_sendable: 1000, - metadata_id, - timestamp: Utc::now() - chrono::Duration::minutes(30), // Started 30 minutes ago - expires: Some(Utc::now() + chrono::Duration::hours(1)), // Expires 1 hour from now - }, - }; - store.post_offer(valid_offer.clone()).await.unwrap(); - - // Try to get the valid offer through OfferProvider::offer - let result = store - .offer("example.com", "default", &offer_id) - .await - .unwrap(); - - // Should return Some for valid offer - assert!(result.is_some()); - let offer = result.unwrap(); - assert_eq!(offer.id, offer_id); - assert_eq!(offer.max_sendable, valid_offer.offer.max_sendable); - assert_eq!(offer.min_sendable, valid_offer.offer.min_sendable); - assert!(offer.metadata_json_string.contains("Valid current offer")); -} diff --git a/components/tests/offer/db_mysql.rs b/components/tests/offer/db_mysql.rs index c3e491d..b308a79 100644 --- a/components/tests/offer/db_mysql.rs +++ b/components/tests/offer/db_mysql.rs @@ -119,42 +119,6 @@ async fn test_mysql_get_all_offer_metadata() { offer::test_get_all_offer_metadata(store).await; } -#[tokio::test] -async fn test_mysql_offer_provider_successful_retrieval() { - let (store, _guard) = create_mysql_store().await; - offer::test_offer_provider_successful_retrieval(store).await; -} - -#[tokio::test] -async fn test_mysql_offer_provider_offer_not_found() { - let (store, _guard) = create_mysql_store().await; - offer::test_offer_provider_offer_not_found(store).await; -} - -#[tokio::test] -async fn test_mysql_offer_provider_metadata_not_found() { - let (store, _guard) = create_mysql_store().await; - offer::test_offer_provider_metadata_not_found_or_foreign_key_constraint(store).await; -} - -#[tokio::test] -async fn test_mysql_offer_provider_hash_consistency() { - let (store, _guard) = create_mysql_store().await; - offer::test_offer_provider_hash_consistency(store).await; -} - -#[tokio::test] -async fn test_mysql_offer_provider_different_metadata_different_hashes() { - let (store, _guard) = create_mysql_store().await; - offer::test_offer_provider_different_metadata_different_hashes(store).await; -} - -#[tokio::test] -async fn test_mysql_offer_provider_valid_current_offer_returns_some() { - let (store, _guard) = create_mysql_store().await; - offer::test_offer_provider_valid_current_offer_returns_some(store).await; -} - #[tokio::test] async fn test_mysql_post_offer_with_missing_metadata() { let (store, _guard) = create_mysql_store().await; diff --git a/components/tests/offer/db_postgres.rs b/components/tests/offer/db_postgres.rs index bb6dce9..0a08137 100644 --- a/components/tests/offer/db_postgres.rs +++ b/components/tests/offer/db_postgres.rs @@ -116,42 +116,6 @@ async fn test_postgres_get_all_offer_metadata() { offer::test_get_all_offer_metadata(store).await; } -#[tokio::test] -async fn test_postgres_offer_provider_successful_retrieval() { - let (store, _guard) = create_postgres_store().await; - offer::test_offer_provider_successful_retrieval(store).await; -} - -#[tokio::test] -async fn test_postgres_offer_provider_offer_not_found() { - let (store, _guard) = create_postgres_store().await; - offer::test_offer_provider_offer_not_found(store).await; -} - -#[tokio::test] -async fn test_postgres_offer_provider_metadata_not_found() { - let (store, _guard) = create_postgres_store().await; - offer::test_offer_provider_metadata_not_found_or_foreign_key_constraint(store).await; -} - -#[tokio::test] -async fn test_postgres_offer_provider_hash_consistency() { - let (store, _guard) = create_postgres_store().await; - offer::test_offer_provider_hash_consistency(store).await; -} - -#[tokio::test] -async fn test_postgres_offer_provider_different_metadata_different_hashes() { - let (store, _guard) = create_postgres_store().await; - offer::test_offer_provider_different_metadata_different_hashes(store).await; -} - -#[tokio::test] -async fn test_postgres_offer_provider_valid_current_offer_returns_some() { - let (store, _guard) = create_postgres_store().await; - offer::test_offer_provider_valid_current_offer_returns_some(store).await; -} - #[tokio::test] async fn test_postgres_post_offer_with_missing_metadata() { let (store, _guard) = create_postgres_store().await; diff --git a/components/tests/offer/db_sqlite.rs b/components/tests/offer/db_sqlite.rs index 156c0ba..cf42d3e 100644 --- a/components/tests/offer/db_sqlite.rs +++ b/components/tests/offer/db_sqlite.rs @@ -125,48 +125,6 @@ async fn test_sqlite_get_all_offer_metadata() { offer::test_get_all_offer_metadata(store).await; } -#[tokio::test] -async fn test_sqlite_offer_provider_successful_retrieval() { - let t = TempDir::new().unwrap(); - let store = create_sqlite_store(t.path()).await; - offer::test_offer_provider_successful_retrieval(store).await; -} - -#[tokio::test] -async fn test_sqlite_offer_provider_offer_not_found() { - let t = TempDir::new().unwrap(); - let store = create_sqlite_store(t.path()).await; - offer::test_offer_provider_offer_not_found(store).await; -} - -#[tokio::test] -async fn test_sqlite_offer_provider_metadata_not_found() { - let t = TempDir::new().unwrap(); - let store = create_sqlite_store(t.path()).await; - offer::test_offer_provider_metadata_not_found_or_foreign_key_constraint(store).await; -} - -#[tokio::test] -async fn test_sqlite_offer_provider_hash_consistency() { - let t = TempDir::new().unwrap(); - let store = create_sqlite_store(t.path()).await; - offer::test_offer_provider_hash_consistency(store).await; -} - -#[tokio::test] -async fn test_sqlite_offer_provider_different_metadata_different_hashes() { - let t = TempDir::new().unwrap(); - let store = create_sqlite_store(t.path()).await; - offer::test_offer_provider_different_metadata_different_hashes(store).await; -} - -#[tokio::test] -async fn test_sqlite_offer_provider_valid_current_offer_returns_some() { - let t = TempDir::new().unwrap(); - let store = create_sqlite_store(t.path()).await; - offer::test_offer_provider_valid_current_offer_returns_some(store).await; -} - #[tokio::test] async fn test_sqlite_post_offer_with_missing_metadata() { let t = TempDir::new().unwrap(); diff --git a/components/tests/offer/http.rs b/components/tests/offer/http.rs index 80b5715..3c5c4ae 100644 --- a/components/tests/offer/http.rs +++ b/components/tests/offer/http.rs @@ -138,48 +138,6 @@ async fn test_http_get_all_offer_metadata() { service.shutdown().await; } -#[tokio::test] -async fn test_http_offer_provider_successful_retrieval() { - let (store, service) = create_http_store().await; - offer::test_offer_provider_successful_retrieval(store).await; - service.shutdown().await; -} - -#[tokio::test] -async fn test_http_offer_provider_offer_not_found() { - let (store, service) = create_http_store().await; - offer::test_offer_provider_offer_not_found(store).await; - service.shutdown().await; -} - -#[tokio::test] -async fn test_http_offer_provider_metadata_not_found() { - let (store, service) = create_http_store().await; - offer::test_offer_provider_metadata_not_found_or_foreign_key_constraint(store).await; - service.shutdown().await; -} - -#[tokio::test] -async fn test_http_offer_provider_hash_consistency() { - let (store, service) = create_http_store().await; - offer::test_offer_provider_hash_consistency(store).await; - service.shutdown().await; -} - -#[tokio::test] -async fn test_http_offer_provider_different_metadata_different_hashes() { - let (store, service) = create_http_store().await; - offer::test_offer_provider_different_metadata_different_hashes(store).await; - service.shutdown().await; -} - -#[tokio::test] -async fn test_http_offer_provider_valid_current_offer_returns_some() { - let (store, service) = create_http_store().await; - offer::test_offer_provider_valid_current_offer_returns_some(store).await; - service.shutdown().await; -} - #[tokio::test] async fn test_http_post_offer_with_missing_metadata() { let (store, service) = create_http_store().await; diff --git a/components/tests/offer/memory.rs b/components/tests/offer/memory.rs index 16bd323..5106492 100644 --- a/components/tests/offer/memory.rs +++ b/components/tests/offer/memory.rs @@ -98,42 +98,6 @@ async fn test_memory_get_all_offer_metadata() { offer::test_get_all_offer_metadata(store).await; } -#[tokio::test] -async fn test_memory_offer_provider_successful_retrieval() { - let store = MemoryOfferStore::default(); - offer::test_offer_provider_successful_retrieval(store).await; -} - -#[tokio::test] -async fn test_memory_offer_provider_offer_not_found() { - let store = MemoryOfferStore::default(); - offer::test_offer_provider_offer_not_found(store).await; -} - -#[tokio::test] -async fn test_memory_offer_provider_metadata_not_found() { - let store = MemoryOfferStore::default(); - offer::test_offer_provider_metadata_not_found_or_foreign_key_constraint(store).await; -} - -#[tokio::test] -async fn test_memory_offer_provider_hash_consistency() { - let store = MemoryOfferStore::default(); - offer::test_offer_provider_hash_consistency(store).await; -} - -#[tokio::test] -async fn test_memory_offer_provider_different_metadata_different_hashes() { - let store = MemoryOfferStore::default(); - offer::test_offer_provider_different_metadata_different_hashes(store).await; -} - -#[tokio::test] -async fn test_memory_offer_provider_valid_current_offer_returns_some() { - let store = MemoryOfferStore::default(); - offer::test_offer_provider_valid_current_offer_returns_some(store).await; -} - #[tokio::test] async fn test_memory_post_offer_with_missing_metadata() { let store = MemoryOfferStore::default(); diff --git a/doc/offer-service-openapi.yaml b/doc/offer-service-openapi.yaml index 50cd466..1cb059c 100644 --- a/doc/offer-service-openapi.yaml +++ b/doc/offer-service-openapi.yaml @@ -6,7 +6,7 @@ info: servers: - url: http://localhost:3002 security: - - bearerAuth: [] + - bearerAuth: [ ] paths: /offers: post: @@ -94,6 +94,13 @@ paths: schema: type: string format: uuid + - name: sparse + in: query + required: false + schema: + default: true + type: boolean + description: If true, only includes metadataId. If false, includes full metadata representation. responses: '200': description: Offer details @@ -316,7 +323,7 @@ paths: get: summary: Health check description: Returns HTTP 200 OK to indicate the offer service is running and accessible. - security: [] + security: [ ] responses: '200': description: Service healthy @@ -359,6 +366,11 @@ components: type: string format: uuid description: Reference to offer metadata + metadata: + allOf: + - $ref: '#/components/schemas/OfferMetadataSparse' + nullable: true + description: Optional full metadata object (included when sparse=false in GET request) timestamp: type: string format: date-time @@ -392,6 +404,11 @@ components: type: string format: uuid description: Reference to offer metadata + metadata: + allOf: + - $ref: '#/components/schemas/OfferMetadataSparse' + nullable: true + description: Optional full metadata object (included when sparse=false) timestamp: type: string format: date-time diff --git a/pingora/Cargo.toml b/pingora/Cargo.toml index 469898f..ddb293d 100644 --- a/pingora/Cargo.toml +++ b/pingora/Cargo.toml @@ -22,6 +22,7 @@ log = "0.4" pingora-core = { version = "0.6", default-features = false } pingora-error = { version = "0.6", default-features = false } pingora-load-balancing = { version = "0.6", default-features = false } +switchgear-components.workspace = true switchgear-service-api.workspace = true thiserror = "2" tokio = { version = "1", features = ["full"] } diff --git a/pingora/src/backoff.rs b/pingora/src/backoff.rs index c700031..bc08bff 100644 --- a/pingora/src/backoff.rs +++ b/pingora/src/backoff.rs @@ -1,18 +1,13 @@ +use crate::PingoraBackoffProvider; use backoff::backoff::{Backoff, Stop}; use backoff::{ExponentialBackoff, ExponentialBackoffBuilder}; use std::sync::Arc; use std::time::Duration; -pub trait BackoffProvider: Clone + Send + Sync { - type Item: Backoff + Send; - - fn get_backoff(&self) -> Self::Item; -} - #[derive(Clone)] pub struct StopBackoffProvider; -impl BackoffProvider for StopBackoffProvider { +impl PingoraBackoffProvider for StopBackoffProvider { type Item = Stop; fn get_backoff(&self) -> Self::Item { @@ -33,7 +28,7 @@ impl ExponentialBackoffProvider { } } -impl BackoffProvider for ExponentialBackoffProvider { +impl PingoraBackoffProvider for ExponentialBackoffProvider { type Item = ExponentialBackoff; fn get_backoff(&self) -> Self::Item { diff --git a/pingora/src/balance.rs b/pingora/src/balance.rs index 9d2c18d..29e6fd0 100644 --- a/pingora/src/balance.rs +++ b/pingora/src/balance.rs @@ -1,5 +1,5 @@ -use crate::backoff::BackoffProvider; use crate::error::PingoraLnError; +use crate::PingoraBackoffProvider; use crate::{PingoraLnBackendExtension, PingoraLnClientPool, PingoraLnMetricsCache}; use async_trait::async_trait; use backoff::backoff::Backoff; @@ -7,10 +7,11 @@ use log::{error, warn}; use pingora_core::services::background::BackgroundService; use pingora_load_balancing::selection::{BackendIter, BackendSelection}; use pingora_load_balancing::{Backend, LoadBalancer}; +use std::error::Error; use std::sync::Arc; use switchgear_service_api::balance::{LnBalancer, LnBalancerBackgroundServices}; use switchgear_service_api::offer::Offer; -use switchgear_service_api::service::ServiceErrorSource; +use switchgear_service_api::service::{HasServiceErrorSource, ServiceErrorSource}; use tokio::sync::watch::Receiver; use tokio::time::sleep; @@ -62,7 +63,7 @@ pub struct PingoraLnBalancer where P: Clone, M: Clone, - B: BackoffProvider, + B: PingoraBackoffProvider, X: MaxIterations, { load_balancer: Arc>, @@ -78,7 +79,7 @@ impl Clone for PingoraLnBalancer where P: Clone, M: Clone, - B: BackoffProvider, + B: PingoraBackoffProvider, X: MaxIterations, { fn clone(&self) -> Self { @@ -98,9 +99,10 @@ impl PingoraLnBalancer where S: BackendSelection + 'static, S::Iter: BackendIter, - P: PingoraLnClientPool + Clone, + P: PingoraLnClientPool + Clone, + P::Error: Error + Send + Sync + 'static + HasServiceErrorSource, M: PingoraLnMetricsCache + Clone, - B: BackoffProvider, + B: PingoraBackoffProvider, X: MaxIterations, { pub fn new( @@ -170,7 +172,13 @@ where let invoice = self .pool .get_invoice(offer, backend, amount_msat.into(), expiry_secs.into()) - .await?; + .await + .map_err(|e| { + PingoraLnError::from_service_error( + format!("get invoice for offer {}/{}", offer.partition, offer.id), + e, + ) + })?; Ok(invoice) } @@ -181,9 +189,10 @@ impl LnBalancer for PingoraLnBalancer where S: BackendSelection + Send + Sync + 'static, S::Iter: BackendIter, - P: PingoraLnClientPool + Send + Sync + Clone + 'static, + P: PingoraLnClientPool + Send + Sync + Clone + 'static, + P::Error: Error + Send + Sync + 'static + HasServiceErrorSource, M: PingoraLnMetricsCache + Send + Sync + Clone + 'static, - B: BackoffProvider + Send + Sync + 'static, + B: PingoraBackoffProvider + Send + Sync + 'static, X: MaxIterations, { type Error = PingoraLnError; @@ -273,9 +282,10 @@ impl LnBalancerBackgroundServices for PingoraLnBalancer + Send + Sync + Clone + 'static, + P: PingoraLnClientPool + Send + Sync + Clone + 'static, + P::Error: Error + Send + Sync + 'static + HasServiceErrorSource, M: PingoraLnMetricsCache + Send + Sync + Clone + 'static, - B: BackoffProvider + Send + Sync + 'static, + B: PingoraBackoffProvider + Send + Sync + 'static, X: MaxIterations, { async fn start(&self, shutdown_rx: Receiver) { @@ -296,7 +306,6 @@ mod tests { use pingora_load_balancing::{Backends, LoadBalancer}; use std::collections::{BTreeSet, HashMap}; use std::hash::{DefaultHasher, Hash, Hasher}; - use std::io; use std::sync::{Arc, Mutex}; use switchgear_service_api::balance::LnBalancer; use switchgear_service_api::discovery::DiscoveryBackend; @@ -328,10 +337,10 @@ mod tests { Ok("mock_invoice".to_string()) } } else { - Err(PingoraLnError::from_io_err( + Err(PingoraLnError::general_error( ServiceErrorSource::Upstream, "mock get_invoice", - io::Error::from(io::ErrorKind::Other), + "forced error".to_string(), )) } } diff --git a/pingora/src/discovery.rs b/pingora/src/discovery.rs index c34b851..6c30ddd 100644 --- a/pingora/src/discovery.rs +++ b/pingora/src/discovery.rs @@ -11,6 +11,7 @@ use std::hash::{DefaultHasher, Hash, Hasher}; use std::net::{Ipv6Addr, SocketAddrV6}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; +use switchgear_service_api::discovery::{DiscoveryBackendStore, DiscoveryBackends}; pub struct LnServiceDiscovery { backend_provider: B, @@ -40,17 +41,7 @@ where { async fn discover(&self) -> pingora_error::Result<(BTreeSet, HashMap)> { let etag = self.last_etag.load(Ordering::Relaxed); - let backends = self - .backend_provider - .backends(Some(etag)) - .await - .map_err(|e| { - pingora_error::Error::because( - pingora_error::ErrorType::InternalError, - "getting all discovery backends", - e, - ) - })?; + let backends = self.backend_provider.backends(Some(etag)).await?; let discovery_backends = match backends.backends { None => { @@ -112,6 +103,35 @@ where } } +pub struct PingoraDiscoveryBackendStoreProvider { + store: S, +} + +impl PingoraDiscoveryBackendStoreProvider { + pub fn new(store: S) -> Self { + Self { store } + } +} + +#[async_trait] +impl PingoraBackendProvider for PingoraDiscoveryBackendStoreProvider +where + S: DiscoveryBackendStore + Sync, +{ + async fn backends( + &self, + etag: Option, + ) -> Result { + self.store.get_all(etag).await.map_err(|e| { + pingora_error::Error::because( + pingora_error::ErrorType::InternalError, + "getting all discovery backends from discovery backend store", + e, + ) + }) + } +} + #[cfg(test)] mod tests { use crate::discovery::LnServiceDiscovery; @@ -124,7 +144,6 @@ mod tests { use secp256k1::{PublicKey, Secp256k1, SecretKey}; use std::collections::{BTreeSet, HashSet}; use std::hash::{DefaultHasher, Hash, Hasher}; - use std::io; use std::sync::Arc; use switchgear_service_api::discovery::{ DiscoveryBackend, DiscoveryBackendSparse, DiscoveryBackends, @@ -139,9 +158,10 @@ mod tests { #[async_trait] impl PingoraBackendProvider for MockBackendProvider { - type Error = PingoraLnError; - - async fn backends(&self, _etag: Option) -> Result { + async fn backends( + &self, + _etag: Option, + ) -> Result { let backends = self .backends_to_return .lock() @@ -150,10 +170,9 @@ mod tests { .cloned() .map(|s| s.into_iter().collect::>()) .ok_or_else(|| { - PingoraLnError::from_io_err( - ServiceErrorSource::Internal, + pingora_error::Error::explain( + pingora_error::ErrorType::InternalError, "Mock BackendProvider forced error", - io::Error::from(io::ErrorKind::Other), ) })?; Ok(DiscoveryBackends { @@ -188,10 +207,10 @@ mod tests { fn connect(&self, _key: Self::Key, _backend: &DiscoveryBackend) -> Result<(), Self::Error> { if self.should_fail_connect { - Err(PingoraLnError::from_io_err( + Err(PingoraLnError::general_error( ServiceErrorSource::Upstream, "Mock LnClientPool forced connect error", - io::Error::from(io::ErrorKind::Other), + "forced error".to_string(), )) } else { Ok(()) @@ -303,7 +322,7 @@ mod tests { let err = result.unwrap_err(); assert_eq!(err.etype, pingora_error::ErrorType::InternalError); assert!(err - .cause + .context .unwrap() .to_string() .contains("Mock BackendProvider forced error")); @@ -381,10 +400,10 @@ mod tests { .iter() .any(|fail_addr| addr_str.as_str() == fail_addr.as_str()) { - Err(PingoraLnError::from_io_err( + Err(PingoraLnError::general_error( ServiceErrorSource::Upstream, "Selective mock pool forced connect error", - io::Error::from(io::ErrorKind::Other), + "forced error".to_string(), )) } else { Ok(()) diff --git a/pingora/src/error.rs b/pingora/src/error.rs index 26e9906..ca1f761 100644 --- a/pingora/src/error.rs +++ b/pingora/src/error.rs @@ -1,4 +1,5 @@ use std::borrow::Cow; +use std::error::Error; use std::fmt::{Display, Formatter}; use switchgear_service_api::service::{HasServiceErrorSource, ServiceErrorSource}; use thiserror::Error; @@ -10,7 +11,7 @@ pub enum PingoraLnErrorSourceKind { #[error("no available lightning nodes")] NoAvailableNodes, #[error("{0}")] - IoError(std::io::Error), + ServiceError(Box), } #[derive(Error, Debug)] @@ -56,15 +57,17 @@ impl PingoraLnError { } } - pub fn from_io_err>>( - esource: ServiceErrorSource, + pub fn from_service_error< + E: Error + HasServiceErrorSource + Send + Sync + 'static, + C: Into>, + >( context: C, - error: std::io::Error, + source: E, ) -> Self { Self { context: context.into(), - source: PingoraLnErrorSourceKind::IoError(error), - esource, + esource: source.get_service_error_source(), + source: PingoraLnErrorSourceKind::ServiceError(source.into()), } } diff --git a/pingora/src/health.rs b/pingora/src/health.rs index 4b18517..ae1e495 100644 --- a/pingora/src/health.rs +++ b/pingora/src/health.rs @@ -57,7 +57,6 @@ mod tests { use crate::error::PingoraLnError; use crate::PingoraLnMetrics; use pingora_core::protocols::l4::socket::SocketAddr; - use std::io; use std::net::SocketAddr as StdSocketAddr; use switchgear_service_api::discovery::DiscoveryBackend; use switchgear_service_api::offer::Offer; @@ -85,10 +84,10 @@ mod tests { async fn get_metrics(&self, _key: &Self::Key) -> Result { if self.return_error { - Err(PingoraLnError::from_io_err( + Err(PingoraLnError::general_error( ServiceErrorSource::Upstream, "get_metrics", - io::Error::from(io::ErrorKind::Other), + "forced error".to_string(), )) } else { Ok(PingoraLnMetrics { diff --git a/pingora/src/lib.rs b/pingora/src/lib.rs index 99222e8..54d189c 100644 --- a/pingora/src/lib.rs +++ b/pingora/src/lib.rs @@ -1,14 +1,17 @@ -use async_trait::async_trait; -use std::collections::BTreeSet; -use std::error::Error; -use switchgear_service_api::discovery::{DiscoveryBackend, DiscoveryBackends}; -use switchgear_service_api::offer::Offer; - pub mod backoff; pub mod balance; pub mod discovery; pub mod error; pub mod health; +pub mod pool; + +use ::backoff::backoff::Backoff; +use async_trait::async_trait; +use std::collections::BTreeSet; +use std::error::Error; +use switchgear_service_api::discovery::{DiscoveryBackend, DiscoveryBackends}; +use switchgear_service_api::offer::Offer; +use switchgear_service_api::service::HasServiceErrorSource; #[derive(Debug, Clone)] pub struct PingoraLnBackendExtension { @@ -17,14 +20,13 @@ pub struct PingoraLnBackendExtension { #[async_trait] pub trait PingoraBackendProvider { - type Error: Error + Send + Sync + 'static; - - async fn backends(&self, etag: Option) -> Result; + async fn backends(&self, etag: Option) + -> Result; } #[async_trait] pub trait PingoraLnClientPool { - type Error: Error + Send + Sync + 'static; + type Error: Error + Send + Sync + 'static + HasServiceErrorSource; type Key: std::hash::Hash + Eq + Send + Sync + 'static; async fn get_invoice( @@ -51,3 +53,9 @@ pub struct PingoraLnMetrics { pub healthy: bool, pub node_effective_inbound_msat: u64, } + +pub trait PingoraBackoffProvider: Clone + Send + Sync { + type Item: Backoff + Send; + + fn get_backoff(&self) -> Self::Item; +} diff --git a/pingora/src/pool.rs b/pingora/src/pool.rs new file mode 100644 index 0000000..29490ad --- /dev/null +++ b/pingora/src/pool.rs @@ -0,0 +1,60 @@ +use crate::{PingoraLnClientPool, PingoraLnMetrics, PingoraLnMetricsCache}; +use async_trait::async_trait; +use pingora_load_balancing::Backend; +use switchgear_components::pool::error::LnPoolError; +use switchgear_components::pool::LnClientPool; +use switchgear_service_api::discovery::DiscoveryBackend; +use switchgear_service_api::offer::Offer; + +#[derive(Clone)] +pub struct DefaultPingoraLnClientPool { + pool: LnClientPool, +} + +impl DefaultPingoraLnClientPool { + pub fn new(pool: LnClientPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl PingoraLnClientPool for DefaultPingoraLnClientPool { + type Error = LnPoolError; + type Key = Backend; + + async fn get_invoice( + &self, + offer: &Offer, + key: &Self::Key, + amount_msat: Option, + expiry_secs: Option, + ) -> Result { + self.pool + .get_invoice(offer, key, amount_msat, expiry_secs) + .await + } + + async fn get_metrics(&self, key: &Self::Key) -> Result { + let metrics = self.pool.get_metrics(key).await?; + Ok(PingoraLnMetrics { + healthy: metrics.healthy, + node_effective_inbound_msat: metrics.node_effective_inbound_msat, + }) + } + + fn connect(&self, key: Self::Key, backend: &DiscoveryBackend) -> Result<(), Self::Error> { + self.pool.connect(key, backend) + } +} + +impl PingoraLnMetricsCache for DefaultPingoraLnClientPool { + type Key = Backend; + + fn get_cached_metrics(&self, key: &Self::Key) -> Option { + let metrics = self.pool.get_cached_metrics(key); + metrics.map(|m| PingoraLnMetrics { + healthy: m.healthy, + node_effective_inbound_msat: m.node_effective_inbound_msat, + }) + } +} diff --git a/server/src/commands/offer/record.rs b/server/src/commands/offer/record.rs index 8307005..25e57d2 100644 --- a/server/src/commands/offer/record.rs +++ b/server/src/commands/offer/record.rs @@ -88,6 +88,7 @@ pub fn new_offer(partition: &str, metadata_id: &Uuid, output: Option<&Path>) -> max_sendable: 0, min_sendable: 0, metadata_id: *metadata_id, + metadata: None, #[allow(clippy::expect_used)] timestamp: DateTime::::from_timestamp_secs(0).expect("unix epoch"), #[allow(clippy::expect_used)] @@ -121,7 +122,7 @@ pub async fn get_offer( ) -> anyhow::Result<()> { let client = create_offer_client(client_configuration)?; if let Some(id) = id { - if let Some(offer) = client.get_offer(partition, id).await? { + if let Some(offer) = client.get_offer(partition, id, None).await? { let offer = serde_json::to_string_pretty(&offer) .with_context(|| format!("serializing offer {id}"))?; cli_write_all(output, offer.as_bytes()).with_context(|| { diff --git a/server/src/di/delegates.rs b/server/src/di/delegates.rs index 29e7109..aa89215 100644 --- a/server/src/di/delegates.rs +++ b/server/src/di/delegates.rs @@ -5,7 +5,6 @@ use crate::di::macros::{ use anyhow::Result; use async_trait::async_trait; use pingora_load_balancing::selection::{Consistent, Random, RoundRobin}; -use pingora_load_balancing::Backend; use secp256k1::PublicKey; use switchgear_components::discovery::db::DbDiscoveryBackendStore; use switchgear_components::discovery::error::DiscoveryBackendStoreError; @@ -15,109 +14,33 @@ use switchgear_components::offer::db::DbOfferStore; use switchgear_components::offer::error::OfferStoreError; use switchgear_components::offer::http::HttpOfferStore; use switchgear_components::offer::memory::MemoryOfferStore; -use switchgear_components::pool::LnClientPool; use switchgear_pingora::backoff::{ - BackoffInstance, BackoffProvider, ExponentialBackoffProvider, StopBackoffProvider, + BackoffInstance, ExponentialBackoffProvider, StopBackoffProvider, }; use switchgear_pingora::balance::{ ConsistentMaxIterations, RandomMaxIterations, RoundRobinMaxIterations, }; use switchgear_pingora::error::PingoraLnError; -use switchgear_pingora::{ - PingoraBackendProvider, PingoraLnClientPool, PingoraLnMetrics, PingoraLnMetricsCache, -}; +use switchgear_pingora::pool::DefaultPingoraLnClientPool; +use switchgear_pingora::PingoraBackoffProvider; use switchgear_service_api::balance::{LnBalancer, LnBalancerBackgroundServices}; use switchgear_service_api::discovery::{ DiscoveryBackend, DiscoveryBackendPatch, DiscoveryBackendStore, DiscoveryBackends, }; use switchgear_service_api::offer::Offer; -use switchgear_service_api::offer::{OfferMetadataStore, OfferProvider, OfferStore}; -use switchgear_service_api::service::ServiceErrorSource; +use switchgear_service_api::offer::{OfferMetadataStore, OfferStore}; use tokio::sync::watch; use uuid::Uuid; // ===== TYPE ALIASES ===== type Balancer = switchgear_pingora::balance::PingoraLnBalancer< T, - LnClientPoolDelegate, - LnClientPoolDelegate, + DefaultPingoraLnClientPool, + DefaultPingoraLnClientPool, BackoffProviderDelegate, X, >; -#[derive(Clone)] -pub enum LnClientPoolDelegate { - Default(LnClientPool), -} - -#[async_trait] -impl PingoraLnClientPool for LnClientPoolDelegate { - type Error = PingoraLnError; - type Key = Backend; - - async fn get_invoice( - &self, - offer: &Offer, - key: &Self::Key, - amount_msat: Option, - expiry_secs: Option, - ) -> std::result::Result { - let LnClientPoolDelegate::Default(delegate) = self; - delegate - .get_invoice(offer, key, amount_msat, expiry_secs) - .await - .map_err(|e| { - PingoraLnError::general_error( - ServiceErrorSource::Upstream, - "get invoice", - e.to_string(), - ) - }) - } - - async fn get_metrics( - &self, - key: &Self::Key, - ) -> std::result::Result { - let LnClientPoolDelegate::Default(delegate) = self; - let metrics = delegate.get_metrics(key).await.map_err(|e| { - PingoraLnError::general_error( - ServiceErrorSource::Upstream, - "get metrics", - e.to_string(), - ) - })?; - Ok(PingoraLnMetrics { - healthy: metrics.healthy, - node_effective_inbound_msat: metrics.node_effective_inbound_msat, - }) - } - - fn connect( - &self, - key: Self::Key, - backend: &DiscoveryBackend, - ) -> std::result::Result<(), Self::Error> { - let LnClientPoolDelegate::Default(delegate) = self; - delegate.connect(key, backend).map_err(|e| { - PingoraLnError::general_error(ServiceErrorSource::Upstream, "connect", e.to_string()) - }) - } -} - -impl PingoraLnMetricsCache for LnClientPoolDelegate { - type Key = Backend; - - fn get_cached_metrics(&self, key: &Self::Key) -> Option { - let LnClientPoolDelegate::Default(delegate) = self; - let metrics = delegate.get_cached_metrics(key); - metrics.map(|m| PingoraLnMetrics { - healthy: m.healthy, - node_effective_inbound_msat: m.node_effective_inbound_msat, - }) - } -} - // ===== LN BALANCER DELEGATE ===== pub enum LnBalancerDelegate { @@ -184,8 +107,9 @@ impl OfferStore for OfferStoreDelegate { &self, partition: &str, id: &Uuid, + sparse: Option, ) -> Result, Self::Error> { - delegate_to_offer_store_variants!(self, get_offer, partition, id).await + delegate_to_offer_store_variants!(self, get_offer, partition, id, sparse).await } async fn get_offers( @@ -256,20 +180,6 @@ impl OfferMetadataStore for OfferStoreDelegate { } } -#[async_trait] -impl OfferProvider for OfferStoreDelegate { - type Error = OfferStoreError; - - async fn offer( - &self, - hostname: &str, - partition: &str, - id: &Uuid, - ) -> Result, Self::Error> { - delegate_to_offer_store_variants!(self, offer, hostname, partition, id).await - } -} - // ===== DISCOVERY BACKEND STORE DELEGATE ===== #[derive(Clone)] @@ -311,33 +221,6 @@ impl DiscoveryBackendStore for DiscoveryBackendStoreDelegate { } } -#[async_trait] -impl PingoraBackendProvider for DiscoveryBackendStoreDelegate { - type Error = PingoraLnError; - - async fn backends(&self, etag: Option) -> Result { - match self { - DiscoveryBackendStoreDelegate::Database(d) => Self::_backends(d.get_all(etag).await), - DiscoveryBackendStoreDelegate::Memory(d) => Self::_backends(d.get_all(etag).await), - DiscoveryBackendStoreDelegate::Http(d) => Self::_backends(d.get_all(etag).await), - } - } -} - -impl DiscoveryBackendStoreDelegate { - fn _backends( - backends_result: Result, - ) -> Result { - backends_result.map_err(|e| { - PingoraLnError::general_error( - ServiceErrorSource::Upstream, - "getting all discovery backends", - e.to_string(), - ) - }) - } -} - // ===== BACKOFF PROVIDER DELEGATE ===== #[derive(Clone)] @@ -346,7 +229,7 @@ pub enum BackoffProviderDelegate { Exponential(ExponentialBackoffProvider), } -impl BackoffProvider for BackoffProviderDelegate { +impl PingoraBackoffProvider for BackoffProviderDelegate { type Item = BackoffInstance; fn get_backoff(&self) -> Self::Item { diff --git a/server/src/di/inject/injectors/balance.rs b/server/src/di/inject/injectors/balance.rs index 0d552ff..0b77634 100644 --- a/server/src/di/inject/injectors/balance.rs +++ b/server/src/di/inject/injectors/balance.rs @@ -1,5 +1,5 @@ use crate::config::{BackendSelectionConfig, BackoffConfig, LnUrlBalancerServiceConfig}; -use crate::di::delegates::{BackoffProviderDelegate, LnBalancerDelegate, LnClientPoolDelegate}; +use crate::di::delegates::{BackoffProviderDelegate, LnBalancerDelegate}; use crate::di::inject::injectors::config::{ServerConfigInjector, ServiceEnablementInjector}; use crate::di::inject::injectors::store::discovery::DiscoveryStoreInjector; use anyhow::{anyhow, Context}; @@ -17,8 +17,9 @@ use switchgear_pingora::backoff::{ExponentialBackoffProvider, StopBackoffProvide use switchgear_pingora::balance::{ ConsistentMaxIterations, PingoraLnBalancer, RandomMaxIterations, RoundRobinMaxIterations, }; -use switchgear_pingora::discovery::LnServiceDiscovery; +use switchgear_pingora::discovery::{LnServiceDiscovery, PingoraDiscoveryBackendStoreProvider}; use switchgear_pingora::health::PingoraLnHealthCheck; +use switchgear_pingora::pool::DefaultPingoraLnClientPool; #[derive(Clone)] pub struct BalancerInjector { @@ -68,6 +69,8 @@ impl BalancerInjector { .await? .ok_or_else(|| anyhow!("lnurl service enabled but has no discovery store"))?; + let discovery = PingoraDiscoveryBackendStoreProvider::new(discovery); + let backoff = match lnurl_config.backoff { BackoffConfig::Stop => BackoffProviderDelegate::Stop(StopBackoffProvider), BackoffConfig::Exponential { @@ -110,7 +113,7 @@ impl BalancerInjector { trusted_roots, ); - let pool = LnClientPoolDelegate::Default(pool); + let pool = DefaultPingoraLnClientPool::new(pool); let discovery = LnServiceDiscovery::new(discovery, pool.clone(), lnurl_config.partitions.clone()); diff --git a/server/src/di/inject/injectors/service/balance.rs b/server/src/di/inject/injectors/service/balance.rs index 48c1d7d..c2fa2d4 100644 --- a/server/src/di/inject/injectors/service/balance.rs +++ b/server/src/di/inject/injectors/service/balance.rs @@ -8,6 +8,7 @@ use std::future::Future; use std::net::{SocketAddr, TcpListener}; use std::pin::Pin; use switchgear_components::axum::middleware::logger::ClfLogger; +use switchgear_components::offer::provider::StoreOfferProvider; use switchgear_service::scheme::Scheme; use switchgear_service::{LnUrlBalancerService, LnUrlPayState}; @@ -59,6 +60,8 @@ impl BalancerServiceInjector { .await? .ok_or_else(|| anyhow!("lnurl service enabled but has no offer store"))?; + let offer_store = StoreOfferProvider::new(offer_store); + let listener = TcpListener::bind(service_config.address).with_context(|| { format!( "binding TCP listener for lnurl service to address {}", diff --git a/server/tests/features/common/step_functions.rs b/server/tests/features/common/step_functions.rs index c19e156..9111412 100644 --- a/server/tests/features/common/step_functions.rs +++ b/server/tests/features/common/step_functions.rs @@ -386,6 +386,7 @@ pub async fn step_when_the_payee_creates_an_offer_for_their_lightning_node( max_sendable, min_sendable, metadata_id, + metadata: None, timestamp: now - ChronoDuration::minutes(5), expires: Some(now + ChronoDuration::hours(24)), }, @@ -833,6 +834,7 @@ pub async fn step_and_the_payee_has_created_an_offer_linked_to_both_lightning_no max_sendable: 1_000_000_000, // 1000 sats in msat min_sendable: 1_000, // 1 sat in msat metadata_id, + metadata: None, timestamp: now - ChronoDuration::minutes(5), expires: Some(now + ChronoDuration::hours(24)), }, diff --git a/service-api/src/offer.rs b/service-api/src/offer.rs index f33bbf3..2fbb95e 100644 --- a/service-api/src/offer.rs +++ b/service-api/src/offer.rs @@ -13,6 +13,7 @@ pub trait OfferStore { &self, partition: &str, id: &Uuid, + sparse: Option, ) -> Result, Self::Error>; async fn get_offers( @@ -108,6 +109,8 @@ pub struct OfferRecordSparse { pub max_sendable: u64, pub min_sendable: u64, pub metadata_id: Uuid, + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, pub timestamp: chrono::DateTime, #[serde(skip_serializing_if = "Option::is_none")] pub expires: Option>, diff --git a/service/Cargo.toml b/service/Cargo.toml index 0750fe0..7d6463d 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -27,7 +27,6 @@ secp256k1 = { version = "0.31", features = ["recovery", "serde"] } serde = "1" serde_json = "1" sqlx = { version = "0.8", default-features = false, features = ["runtime-tokio", "tls-rustls-aws-lc-rs", "sqlite", "postgres", "mysql"] } -switchgear-components.workspace = true switchgear-service-api.workspace = true thiserror = "2" tokio = { version = "1", features = ["full"] } @@ -37,8 +36,10 @@ uuid = { version = "1", features = ["v4", "serde"] } [dev-dependencies] axum-test = "18" +indexmap = "2" p256 = { version = "0.13", features = ["ecdsa"] } pkcs8 = { version = "0.10", features = ["pem"] } png = "0.18" rand = "0.8" rqrr = "0.10" +sha2 = "0.10" diff --git a/service/src/discovery/handler.rs b/service/src/discovery/handler.rs index 93529df..6f072c6 100644 --- a/service/src/discovery/handler.rs +++ b/service/src/discovery/handler.rs @@ -74,13 +74,14 @@ impl DiscoveryHandlers { where S: DiscoveryBackendStore, { + let location = backend.public_key.to_string(); + let result = state .store() - .post(backend.clone()) + .post(backend) .await .map_err(|e| crate::crud_error_from_service!(e))?; - let location = backend.public_key.to_string(); let location = HeaderValue::from_str(&location)?; match result { @@ -106,7 +107,7 @@ impl DiscoveryHandlers { let was_created = state .store() - .put(backend.clone()) + .put(backend) .await .map_err(|e| crate::crud_error_from_service!(e))?; @@ -134,7 +135,7 @@ impl DiscoveryHandlers { let patched = state .store() - .patch(backend.clone()) + .patch(backend) .await .map_err(|e| crate::crud_error_from_service!(e))?; diff --git a/service/src/discovery/service.rs b/service/src/discovery/service.rs index 514cf97..63896a7 100644 --- a/service/src/discovery/service.rs +++ b/service/src/discovery/service.rs @@ -52,6 +52,7 @@ mod tests { use crate::discovery::auth::{DiscoveryAudience, DiscoveryClaims}; use crate::discovery::service::DiscoveryService; use crate::discovery::state::DiscoveryState; + use crate::testing::discovery::store::TestDiscoveryBackendStore; use axum::http::StatusCode; use axum_test::TestServer; use jsonwebtoken::{encode, Algorithm, DecodingKey, EncodingKey, Header}; @@ -61,7 +62,6 @@ mod tests { use rand::{thread_rng, Rng}; use secp256k1::{PublicKey, Secp256k1, SecretKey}; use std::time::{SystemTime, UNIX_EPOCH}; - use switchgear_components::discovery::memory::MemoryDiscoveryBackendStore; use switchgear_service_api::discovery::{ DiscoveryBackend, DiscoveryBackendPatchSparse, DiscoveryBackendSparse, }; @@ -104,7 +104,7 @@ mod tests { .unwrap(); let decoding_key = DecodingKey::from_ec_pem(public_key.as_bytes()).unwrap(); - let store = MemoryDiscoveryBackendStore::new(); + let store = TestDiscoveryBackendStore::default(); let state = DiscoveryState::new(store, decoding_key); let header = Header::new(Algorithm::ES256); diff --git a/service/src/lib.rs b/service/src/lib.rs index c9e9d94..7145c14 100644 --- a/service/src/lib.rs +++ b/service/src/lib.rs @@ -2,6 +2,8 @@ pub(crate) mod axum; pub(crate) mod discovery; pub(crate) mod lnurl; pub(crate) mod offer; +#[cfg(test)] +mod testing; pub use axum::extract::scheme; diff --git a/service/src/lnurl/service.rs b/service/src/lnurl/service.rs index 60de104..4651961 100644 --- a/service/src/lnurl/service.rs +++ b/service/src/lnurl/service.rs @@ -47,12 +47,12 @@ mod tests { use crate::axum::extract::scheme::Scheme; use crate::lnurl::pay::state::LnUrlPayState; use crate::lnurl::service::LnUrlBalancerService; + use crate::testing::offer::store::TestOfferStore; use async_trait::async_trait; use axum::http::StatusCode; use axum_test::TestServer; use chrono::{Duration, Utc}; use std::collections::HashSet; - use switchgear_components::offer::memory::MemoryOfferStore; use switchgear_service_api::balance::LnBalancer; use switchgear_service_api::lnurl::{LnUrlInvoice, LnUrlOffer, LnUrlOfferMetadata}; use switchgear_service_api::offer::{ @@ -175,6 +175,7 @@ mod tests { max_sendable: 1000000, min_sendable: 1000, metadata_id, + metadata: None, timestamp: Utc::now() - Duration::hours(1), expires: Some(Utc::now() + Duration::hours(1)), }, @@ -215,7 +216,7 @@ mod tests { partitions: Option>, ) -> (TestServer, MockLnBalancer) { let partition = offer.partition.clone(); - let offer_provider = MemoryOfferStore::default(); + let offer_provider = TestOfferStore::default(); // Create metadata for the offer let metadata = OfferMetadata { @@ -252,7 +253,7 @@ mod tests { } fn create_empty_test_server() -> TestServer { - let offer_provider = MemoryOfferStore::default(); + let offer_provider = TestOfferStore::default(); let balancer = MockLnBalancer::new(); let state = LnUrlPayState::new( HashSet::from(["default".to_string()]), @@ -273,7 +274,7 @@ mod tests { async fn create_test_server_with_failing_balancer(offer: OfferRecord) -> TestServer { let partition = offer.partition.clone(); - let offer_provider = MemoryOfferStore::default(); + let offer_provider = TestOfferStore::default(); // Create metadata for the offer let metadata = OfferMetadata { @@ -357,7 +358,8 @@ mod tests { scheme: &str, ) -> (TestServer, Uuid) { let partition = offer.partition.clone(); - let offer_provider = MemoryOfferStore::default(); + let offer_provider = TestOfferStore::default(); + let metadata = OfferMetadata { id: offer.offer.metadata_id, partition: offer.partition.clone(), @@ -729,7 +731,7 @@ mod tests { #[tokio::test] async fn get_invoice_with_custom_invoice_response() { let test_offer = create_test_offer(); - let offer_provider = MemoryOfferStore::default(); + let offer_provider = TestOfferStore::default(); let partition = test_offer.partition.clone(); diff --git a/service/src/offer/handler.rs b/service/src/offer/handler.rs index 73097f7..115885c 100644 --- a/service/src/offer/handler.rs +++ b/service/src/offer/handler.rs @@ -24,10 +24,16 @@ pub struct GetAllMetadataQueryParameters { pub count: Option, } +#[derive(Deserialize, Debug)] +pub struct GetOfferQueryParameters { + pub sparse: Option, +} + pub struct OfferHandlers; impl OfferHandlers { pub async fn get_offer( + Query(params): Query, UuidParam { partition, id }: UuidParam, State(state): State>, ) -> Result, CrudError> @@ -37,7 +43,7 @@ impl OfferHandlers { { let offer = state .offer_store() - .get_offer(&partition, &id) + .get_offer(&partition, &id, params.sparse) .await .map_err(|e| crate::crud_error_from_service!(e))? .ok_or(CrudError::not_found())?; @@ -73,19 +79,21 @@ impl OfferHandlers { pub async fn post_offer( State(state): State>, - Json(offer): Json, + Json(mut offer): Json, ) -> Result, CrudError> where S: OfferStore, M: OfferMetadataStore, { + offer.offer.metadata = None; + let location = format!("{}/{}", offer.partition, offer.id); + let result = state .offer_store() - .post_offer(offer.clone()) + .post_offer(offer) .await .map_err(|e| crate::crud_error_from_service!(e))?; - let location = format!("{}/{}", offer.partition, offer.id); let location = HeaderValue::from_str(&location)?; match result { @@ -103,15 +111,17 @@ impl OfferHandlers { S: OfferStore, M: OfferMetadataStore, { - let offer = OfferRecord { + let mut offer = OfferRecord { partition, id, offer, }; + offer.offer.metadata = None; + let was_created = state .offer_store() - .put_offer(offer.clone()) + .put_offer(offer) .await .map_err(|e| crate::crud_error_from_service!(e))?; @@ -194,13 +204,14 @@ impl OfferHandlers { S: OfferStore, M: OfferMetadataStore, { + let location = format!("{}/{}", metadata.partition, metadata.id); + let result = state .metadata_store() - .post_metadata(metadata.clone()) + .post_metadata(metadata) .await .map_err(|e| crate::crud_error_from_service!(e))?; - let location = format!("{}/{}", metadata.partition, metadata.id); let location = HeaderValue::from_str(&location)?; match result { @@ -226,7 +237,7 @@ impl OfferHandlers { let was_created = state .metadata_store() - .put_metadata(metadata.clone()) + .put_metadata(metadata) .await .map_err(|e| crate::crud_error_from_service!(e))?; diff --git a/service/src/offer/service.rs b/service/src/offer/service.rs index 392bd78..6afae16 100644 --- a/service/src/offer/service.rs +++ b/service/src/offer/service.rs @@ -59,6 +59,7 @@ impl OfferService { mod tests { use crate::offer::service::OfferService; use crate::offer::state::OfferState; + use crate::testing::offer::store::TestOfferStore; use crate::{OfferAudience, OfferClaims}; use axum::http::StatusCode; use axum_test::TestServer; @@ -69,7 +70,6 @@ mod tests { use p256::pkcs8::EncodePublicKey; use rand::thread_rng; use std::time::{SystemTime, UNIX_EPOCH}; - use switchgear_components::offer::memory::MemoryOfferStore; use switchgear_service_api::offer::{ OfferMetadata, OfferMetadataIdentifier, OfferMetadataImage, OfferMetadataSparse, OfferMetadataStore, OfferRecord, OfferRecordSparse, OfferStore, @@ -84,6 +84,7 @@ mod tests { max_sendable: 1000000, min_sendable: 1000, metadata_id, + metadata: None, timestamp: Utc::now() - Duration::hours(1), expires: Some(Utc::now() + Duration::hours(1)), }, @@ -134,7 +135,7 @@ mod tests { }; let authorization = encode(&header, &claims, &encoding_key).unwrap(); - let store = MemoryOfferStore::default(); + let store = TestOfferStore::default(); for m in metadata { store.put_metadata(m).await.unwrap(); @@ -181,7 +182,7 @@ mod tests { }; let authorization = encode(&header, &claims, &encoding_key).unwrap(); - let store = MemoryOfferStore::default(); + let store = TestOfferStore::default(); store.put_metadata(metadata).await.unwrap(); let state = OfferState::new(store.clone(), store, decoding_key, 100); @@ -218,7 +219,7 @@ mod tests { }; let authorization = encode(&header, &claims, &encoding_key).unwrap(); - let store = MemoryOfferStore::default(); + let store = TestOfferStore::default(); let state = OfferState::new(store.clone(), store, decoding_key, 100); let app = OfferService::router(state); @@ -284,11 +285,12 @@ mod tests { #[tokio::test] async fn get_offer_when_exists_then_returns_resource() { let test_metadata = create_test_metadata(); - let test_offer = create_test_offer_with_metadata_id(test_metadata.id); + let mut test_offer = create_test_offer_with_metadata_id(test_metadata.id); let offer_id = test_offer.id; let server = - create_test_server_with_offer(vec![test_offer.clone()], vec![test_metadata]).await; + create_test_server_with_offer(vec![test_offer.clone()], vec![test_metadata.clone()]) + .await; let response = server .server .get(&format!("/offers/default/{offer_id}")) @@ -297,11 +299,18 @@ mod tests { assert_eq!(response.status_code(), StatusCode::OK); let returned_offer: OfferRecord = response.json(); - assert_eq!(returned_offer.id, offer_id); - assert_eq!( - returned_offer.offer.max_sendable, - test_offer.offer.max_sendable - ); + assert_eq!(test_offer, returned_offer); + + let response = server + .server + .get(&format!("/offers/default/{offer_id}?sparse=false")) + .authorization_bearer(server.authorization.clone()) + .await; + + test_offer.offer.metadata = Some(test_metadata.metadata); + assert_eq!(response.status_code(), StatusCode::OK); + let returned_offer: OfferRecord = response.json(); + assert_eq!(test_offer, returned_offer); } #[tokio::test] diff --git a/service/src/testing/discovery/mod.rs b/service/src/testing/discovery/mod.rs new file mode 100644 index 0000000..55c88cb --- /dev/null +++ b/service/src/testing/discovery/mod.rs @@ -0,0 +1 @@ +pub mod store; diff --git a/service/src/testing/discovery/store.rs b/service/src/testing/discovery/store.rs new file mode 100644 index 0000000..adba573 --- /dev/null +++ b/service/src/testing/discovery/store.rs @@ -0,0 +1,111 @@ +use crate::testing::error::TestError; +use async_trait::async_trait; +use indexmap::IndexMap; +use secp256k1::PublicKey; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use switchgear_service_api::discovery::{ + DiscoveryBackend, DiscoveryBackendPatch, DiscoveryBackendStore, DiscoveryBackends, +}; +use tokio::sync::Mutex; + +#[derive(Clone, Debug)] +pub struct TestDiscoveryBackendStore { + store: Arc>>, + etag: Arc, +} + +impl Default for TestDiscoveryBackendStore { + fn default() -> Self { + Self::new() + } +} + +impl TestDiscoveryBackendStore { + pub fn new() -> Self { + Self { + store: Arc::new(Mutex::new(IndexMap::new())), + etag: Arc::new(Default::default()), + } + } +} + +#[async_trait] +impl DiscoveryBackendStore for TestDiscoveryBackendStore { + type Error = TestError; + + async fn get(&self, public_key: &PublicKey) -> Result, Self::Error> { + let store = self.store.lock().await; + Ok(store.get(public_key).cloned()) + } + + async fn get_all(&self, request_etag: Option) -> Result { + let store = self.store.lock().await; + let response_etag = self.etag.load(Ordering::Relaxed); + + if request_etag == Some(response_etag) { + Ok(DiscoveryBackends { + etag: response_etag, + backends: None, + }) + } else { + let backends: Vec = store.values().cloned().collect(); + + Ok(DiscoveryBackends { + etag: response_etag, + backends: Some(backends), + }) + } + } + + async fn post(&self, backend: DiscoveryBackend) -> Result, Self::Error> { + let mut store = self.store.lock().await; + if store.contains_key(&backend.public_key) { + return Ok(None); + } + let key = backend.public_key; + store.insert(backend.public_key, backend); + self.etag.fetch_add(1, Ordering::Relaxed); + Ok(Some(key)) + } + + async fn put(&self, backend: DiscoveryBackend) -> Result { + let mut store = self.store.lock().await; + let key = backend.public_key; + let was_new = !store.contains_key(&key); + store.insert(key, backend); + self.etag.fetch_add(1, Ordering::Relaxed); + Ok(was_new) + } + + async fn patch(&self, backend: DiscoveryBackendPatch) -> Result { + let mut store = self.store.lock().await; + let entry = match store.get_mut(&backend.public_key) { + None => return Ok(false), + Some(entry) => entry, + }; + if let Some(weight) = backend.backend.weight { + entry.backend.weight = weight; + } + if let Some(enabled) = backend.backend.enabled { + entry.backend.enabled = enabled; + } + if let Some(partitions) = backend.backend.partitions { + entry.backend.partitions = partitions; + } + if let Some(name) = backend.backend.name { + entry.backend.name = name; + } + self.etag.fetch_add(1, Ordering::Relaxed); + Ok(true) + } + + async fn delete(&self, public_key: &PublicKey) -> Result { + let mut store = self.store.lock().await; + let was_found = store.swap_remove(public_key).is_some(); + if was_found { + self.etag.fetch_add(1, Ordering::Relaxed); + } + Ok(was_found) + } +} diff --git a/service/src/testing/error.rs b/service/src/testing/error.rs new file mode 100644 index 0000000..eaacb58 --- /dev/null +++ b/service/src/testing/error.rs @@ -0,0 +1,49 @@ +use std::borrow::Cow; +use std::fmt::{Display, Formatter}; +use switchgear_service_api::service::{HasServiceErrorSource, ServiceErrorSource}; +use thiserror::Error; + +#[derive(Error, Debug)] +pub struct TestError { + context: Cow<'static, str>, + #[source] + source: TestErrorSource, + esource: ServiceErrorSource, +} + +impl Display for TestError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "DiscoveryBackendStoreError: while {}: {}", + self.context.as_ref(), + self.source + ) + } +} + +#[derive(Error, Debug)] +pub enum TestErrorSource { + #[error("{0}")] + Error(String), +} + +impl TestError { + pub fn error>>( + error: String, + esource: ServiceErrorSource, + context: C, + ) -> Self { + Self { + context: context.into(), + source: TestErrorSource::Error(error), + esource, + } + } +} + +impl HasServiceErrorSource for TestError { + fn get_service_error_source(&self) -> ServiceErrorSource { + self.esource + } +} diff --git a/service/src/testing/mod.rs b/service/src/testing/mod.rs new file mode 100644 index 0000000..4113959 --- /dev/null +++ b/service/src/testing/mod.rs @@ -0,0 +1,3 @@ +pub mod discovery; +pub mod error; +pub mod offer; diff --git a/service/src/testing/offer/mod.rs b/service/src/testing/offer/mod.rs new file mode 100644 index 0000000..55c88cb --- /dev/null +++ b/service/src/testing/offer/mod.rs @@ -0,0 +1 @@ +pub mod store; diff --git a/service/src/testing/offer/store.rs b/service/src/testing/offer/store.rs new file mode 100644 index 0000000..489c51e --- /dev/null +++ b/service/src/testing/offer/store.rs @@ -0,0 +1,262 @@ +use crate::testing::error::TestError; +use async_trait::async_trait; +use indexmap::IndexMap; +use sha2::{Digest, Sha256}; +use std::sync::Arc; +use switchgear_service_api::lnurl::LnUrlOfferMetadata; +use switchgear_service_api::offer::{ + Offer, OfferMetadata, OfferMetadataStore, OfferProvider, OfferRecord, OfferStore, +}; +use switchgear_service_api::service::ServiceErrorSource; +use tokio::sync::Mutex; +use uuid::Uuid; + +/// Simplified in-memory offer store for unit tests. +/// This is a minimal implementation designed to replace MemoryOfferStore +/// in service crate tests. Uses IndexMap to preserve insertion order. +#[derive(Clone, Debug)] +pub struct TestOfferStore { + offer: Arc>>, + metadata: Arc>>, +} + +impl TestOfferStore { + pub fn new() -> Self { + Self { + offer: Arc::new(Mutex::new(IndexMap::new())), + metadata: Arc::new(Mutex::new(IndexMap::new())), + } + } +} + +impl Default for TestOfferStore { + fn default() -> Self { + Self::new() + } +} + +#[async_trait] +impl OfferStore for TestOfferStore { + type Error = TestError; + + async fn get_offer( + &self, + partition: &str, + id: &Uuid, + sparse: Option, + ) -> Result, Self::Error> { + let sparse = sparse.unwrap_or(true); + let metadata_store = self.metadata.lock().await; + let store = self.offer.lock().await; + + Ok(store.get(&(partition.to_string(), *id)).and_then(|offer| { + if sparse { + Some(offer.clone()) + } else { + metadata_store + .get(&(partition.to_string(), offer.offer.metadata_id)) + .map(|metadata| { + let mut offer = offer.clone(); + offer.offer.metadata = Some(metadata.metadata.clone()); + offer + }) + } + })) + } + + async fn get_offers( + &self, + partition: &str, + start: usize, + count: usize, + ) -> Result, Self::Error> { + let store = self.offer.lock().await; + // IndexMap preserves insertion order + let offers: Vec = store + .iter() + .filter(|((p, _), _)| p == partition) + .skip(start) + .take(count) + .map(|(_, offer)| offer.clone()) + .collect(); + + Ok(offers) + } + + async fn post_offer(&self, offer: OfferRecord) -> Result, Self::Error> { + let metadata_store = self.metadata.lock().await; + let mut store = self.offer.lock().await; + + if !metadata_store.contains_key(&(offer.partition.to_string(), offer.offer.metadata_id)) { + return Err(TestError::error( + format!( + "metadata {} not found for offer {}", + offer.offer.metadata_id, offer.id + ), + ServiceErrorSource::Downstream, + format!("post offer {offer:?}"), + )); + } + + if let indexmap::map::Entry::Vacant(e) = + store.entry((offer.partition.to_string(), offer.id)) + { + e.insert(offer.clone()); + Ok(Some(offer.id)) + } else { + Ok(None) + } + } + + async fn put_offer(&self, offer: OfferRecord) -> Result { + let metadata_store = self.metadata.lock().await; + let mut store = self.offer.lock().await; + + if !metadata_store.contains_key(&(offer.partition.to_string(), offer.offer.metadata_id)) { + return Err(TestError::error( + format!( + "metadata {} not found for offer {}", + offer.offer.metadata_id, offer.id + ), + ServiceErrorSource::Downstream, + format!("put offer {offer:?}"), + )); + } + + let was_new = store + .insert((offer.partition.to_string(), offer.id), offer) + .is_none(); + Ok(was_new) + } + + async fn delete_offer(&self, partition: &str, id: &Uuid) -> Result { + let mut store = self.offer.lock().await; + Ok(store.swap_remove(&(partition.to_string(), *id)).is_some()) + } +} + +#[async_trait] +impl OfferMetadataStore for TestOfferStore { + type Error = TestError; + + async fn get_metadata( + &self, + partition: &str, + id: &Uuid, + ) -> Result, Self::Error> { + let store = self.metadata.lock().await; + Ok(store.get(&(partition.to_string(), *id)).cloned()) + } + + async fn get_all_metadata( + &self, + partition: &str, + start: usize, + count: usize, + ) -> Result, Self::Error> { + let store = self.metadata.lock().await; + // IndexMap preserves insertion order + let metadata: Vec = store + .iter() + .filter(|((p, _), _)| p == partition) + .skip(start) + .take(count) + .map(|(_, metadata)| metadata.clone()) + .collect(); + + Ok(metadata) + } + + async fn post_metadata(&self, metadata: OfferMetadata) -> Result, Self::Error> { + let mut store = self.metadata.lock().await; + if let indexmap::map::Entry::Vacant(e) = + store.entry((metadata.partition.to_string(), metadata.id)) + { + e.insert(metadata.clone()); + Ok(Some(metadata.id)) + } else { + Ok(None) + } + } + + async fn put_metadata(&self, metadata: OfferMetadata) -> Result { + let mut store = self.metadata.lock().await; + let was_new = store + .insert((metadata.partition.to_string(), metadata.id), metadata) + .is_none(); + Ok(was_new) + } + + async fn delete_metadata(&self, partition: &str, id: &Uuid) -> Result { + let offer_store = self.offer.lock().await; + let mut metadata_store = self.metadata.lock().await; + + let metadata_in_use = offer_store + .values() + .any(|offer| offer.partition == partition && offer.offer.metadata_id == *id); + + if metadata_in_use { + return Err(TestError::error( + format!("metadata {} is referenced by existing offers", id), + ServiceErrorSource::Downstream, + format!("delete metadata {partition}/{id}"), + )); + } + + Ok(metadata_store + .swap_remove(&(partition.to_string(), *id)) + .is_some()) + } +} + +#[async_trait] +impl OfferProvider for TestOfferStore { + type Error = TestError; + + async fn offer( + &self, + _hostname: &str, + partition: &str, + id: &Uuid, + ) -> Result, Self::Error> { + if let Some(offer) = self.get_offer(partition, id, Some(false)).await? { + let offer_metadata = match self + .get_metadata(partition, &offer.offer.metadata_id) + .await? + { + Some(metadata) => metadata, + None => { + return Ok(None); + } + }; + + let lnurl_metadata = LnUrlOfferMetadata(offer_metadata.metadata); + let metadata_json_string = serde_json::to_string(&lnurl_metadata).map_err(|e| { + TestError::error( + format!("serialization error: {e}"), + ServiceErrorSource::Internal, + format!( + "serializing LnUrlOfferMetadata while building LNURL offer response for {offer:?}" + ), + ) + })?; + + let mut hasher = Sha256::new(); + hasher.update(metadata_json_string.as_bytes()); + let metadata_json_hash = hasher.finalize().into(); + + Ok(Some(Offer { + partition: offer.partition, + id: offer.id, + max_sendable: offer.offer.max_sendable, + min_sendable: offer.offer.min_sendable, + metadata_json_string, + metadata_json_hash, + timestamp: offer.offer.timestamp, + expires: offer.offer.expires, + })) + } else { + Ok(None) + } + } +}