From 9973fb24a8bf52854299978d7c2ed0ac911a912f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 6 Feb 2026 12:36:01 +0800 Subject: [PATCH 01/64] chore: add persub limit option to pubsub config --- .../src/remote_account_provider/pubsub_common.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs index c3dbeca11..65296eb7b 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs @@ -15,6 +15,7 @@ use crate::remote_account_provider::RemoteAccountProviderResult; pub struct PubsubClientConfig { pub pubsub_url: String, pub commitment_config: CommitmentConfig, + pub per_stream_subscription_limit: Option, } impl PubsubClientConfig { @@ -22,9 +23,17 @@ impl PubsubClientConfig { pubsub_url: impl Into, commitment_config: CommitmentConfig, ) -> Self { + let pubsub_url = pubsub_url.into(); + let per_stream_subscription_limit = + if pubsub_url.to_lowercase().contains("helius") { + Some(HELIUS_PER_STREAM_SUBSCRIPTION_LIMIT) + } else { + None + }; Self { - pubsub_url: pubsub_url.into(), + pubsub_url, commitment_config, + per_stream_subscription_limit, } } } @@ -88,5 +97,7 @@ pub enum ChainPubsubActorMessage { }, } +pub const HELIUS_PER_STREAM_SUBSCRIPTION_LIMIT: usize = 100; + pub const SUBSCRIPTION_UPDATE_CHANNEL_SIZE: usize = 5_000; -pub const MESSAGE_CHANNEL_SIZE: usize = 1_000; +pub const MESSAGE_CHANNEL_SIZE: usize = 1_000; \ No newline at end of file From 93cf5506515567dcf524fc3f3672e3de87821e90 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 6 Feb 2026 15:47:15 +0800 Subject: [PATCH 02/64] feat: initial pool impl using pubsub client trait --- .../chain_pubsub_actor.rs | 30 +-- .../chain_pubsub_client.rs | 37 +++- .../src/remote_account_provider/errors.rs | 13 ++ .../src/remote_account_provider/mod.rs | 1 + .../remote_account_provider/pubsub_common.rs | 2 +- .../pubsub_connection_pool.rs | 184 ++++++++++++++++++ test-integration/Cargo.lock | 70 +++---- 7 files changed, 277 insertions(+), 60 deletions(-) create mode 100644 magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index d7dbcf6c5..576514918 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -29,8 +29,8 @@ use tokio_util::sync::CancellationToken; use tracing::*; use super::{ - chain_pubsub_client::PubSubConnection, errors::{RemoteAccountProviderError, RemoteAccountProviderResult}, + pubsub_connection_pool::PubSubConnectionPool, }; use crate::remote_account_provider::{ pubsub_common::{ @@ -50,8 +50,8 @@ const CLOCK_LOG_SLOT_FREQ: u64 = 25; pub struct ChainPubsubActor { /// Configuration used to create the pubsub client pubsub_client_config: PubsubClientConfig, - /// Underlying pubsub connection to connect to the chain - pubsub_connection: Arc, + /// Underlying pubsub connection pool to connect to the chain + pubsub_connection: Arc, /// Sends subscribe/unsubscribe messages to this actor messages_sender: mpsc::Sender, /// Map of subscriptions we are holding @@ -93,16 +93,19 @@ impl ChainPubsubActor { ) -> RemoteAccountProviderResult<(Self, mpsc::Receiver)> { let url = pubsub_client_config.pubsub_url.clone(); + let limit = pubsub_client_config.per_stream_subscription_limit + .unwrap_or(usize::MAX); let pubsub_connection = { - let pubsub_connection = - PubSubConnection::new(url).await.inspect_err(|err| { + let pubsub_pool = PubSubConnectionPool::new(url, limit) + .await + .inspect_err(|err| { error!( client_id = client_id, err = ?err, "Failed to connect to provider" ) })?; - Arc::new(pubsub_connection) + Arc::new(pubsub_pool) }; let (subscription_updates_sender, subscription_updates_receiver) = @@ -252,7 +255,7 @@ impl ChainPubsubActor { async fn handle_msg( subscriptions: Arc>>, program_subs: Arc>>, - pubsub_connection: Arc, + pubsub_connection: Arc, subscription_updates_sender: mpsc::Sender, pubsub_client_config: PubsubClientConfig, abort_sender: mpsc::Sender<()>, @@ -408,7 +411,7 @@ impl ChainPubsubActor { sub_response: oneshot::Sender>, subs: Arc>>, program_subs: Arc>>, - pubsub_connection: Arc, + pubsub_connection: Arc, subscription_updates_sender: mpsc::Sender, abort_sender: mpsc::Sender<()>, is_connected: Arc, @@ -570,7 +573,7 @@ impl ChainPubsubActor { sub_response: oneshot::Sender>, subs: Arc>>, program_subs: Arc>>, - pubsub_connection: Arc, + pubsub_connection: Arc, subscription_updates_sender: mpsc::Sender, abort_sender: mpsc::Sender<()>, is_connected: Arc, @@ -726,16 +729,13 @@ impl ChainPubsubActor { #[instrument(skip(pubsub_connection, pubsub_client_config, is_connected), fields(client_id = %client_id))] async fn try_reconnect( - pubsub_connection: Arc, + pubsub_connection: Arc, pubsub_client_config: PubsubClientConfig, client_id: &str, is_connected: Arc, ) -> RemoteAccountProviderResult<()> { // 1. Try to reconnect the pubsub connection - if let Err(err) = pubsub_connection.reconnect().await { - debug!(error = ?err, "Failed to reconnect"); - return Err(err.into()); - } + pubsub_connection.clear_connections(); // Make a sub to any account and unsub immediately to verify connection let pubkey = Pubkey::new_unique(); let config = RpcAccountInfoConfig { @@ -817,4 +817,4 @@ impl ChainPubsubActor { } }); } -} +} \ No newline at end of file diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index afd84974a..29d9c244e 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -34,12 +34,12 @@ use super::{ pubsub_common::{ChainPubsubActorMessage, SubscriptionUpdate}, }; -type UnsubscribeFn = Box BoxFuture<'static, ()> + Send>; -type SubscribeResult = PubsubClientResult<( +pub type UnsubscribeFn = Box BoxFuture<'static, ()> + Send>; +pub type SubscribeResult = PubsubClientResult<( BoxStream<'static, Response>, UnsubscribeFn, )>; -type ProgramSubscribeResult = PubsubClientResult<( +pub type ProgramSubscribeResult = PubsubClientResult<( BoxStream<'static, Response>, UnsubscribeFn, )>; @@ -48,13 +48,29 @@ const MAX_RECONNECT_ATTEMPTS: usize = 5; const RECONNECT_ATTEMPT_DELAY: Duration = Duration::from_millis(500); const MAX_RESUB_DELAY_MS: u64 = 800; -pub struct PubSubConnection { +#[async_trait] +pub trait PubsubConnection { + fn url(&self) -> &str; + async fn account_subscribe( + &self, + pubkey: &Pubkey, + config: RpcAccountInfoConfig, + ) -> SubscribeResult; + async fn program_subscribe( + &self, + program_id: &Pubkey, + config: RpcProgramAccountsConfig, + ) -> ProgramSubscribeResult; + async fn reconnect(&self) -> PubsubClientResult<()>; +} + +pub struct PubsubConnectionImpl { client: ArcSwap, url: String, reconnect_guard: AsyncMutex<()>, } -impl PubSubConnection { +impl PubsubConnectionImpl { pub async fn new(url: String) -> RemoteAccountProviderResult { let client = Arc::new(PubsubClient::new(&url).await?).into(); let reconnect_guard = AsyncMutex::new(()); @@ -64,12 +80,15 @@ impl PubSubConnection { reconnect_guard, }) } +} - pub fn url(&self) -> &str { +#[async_trait] +impl PubsubConnection for PubsubConnectionImpl { + fn url(&self) -> &str { &self.url } - pub async fn account_subscribe( + async fn account_subscribe( &self, pubkey: &Pubkey, config: RpcAccountInfoConfig, @@ -90,7 +109,7 @@ impl PubSubConnection { Ok((stream, unsub)) } - pub async fn program_subscribe( + async fn program_subscribe( &self, program_id: &Pubkey, config: RpcProgramAccountsConfig, @@ -113,7 +132,7 @@ impl PubSubConnection { Ok((stream, unsub)) } - pub async fn reconnect(&self) -> PubsubClientResult<()> { + async fn reconnect(&self) -> PubsubClientResult<()> { // Prevents multiple reconnect attempts running concurrently let _guard = match self.reconnect_guard.try_lock() { Ok(g) => g, diff --git a/magicblock-chainlink/src/remote_account_provider/errors.rs b/magicblock-chainlink/src/remote_account_provider/errors.rs index fe5498f94..8db91fa02 100644 --- a/magicblock-chainlink/src/remote_account_provider/errors.rs +++ b/magicblock-chainlink/src/remote_account_provider/errors.rs @@ -120,3 +120,16 @@ impl From Self::PubsubClientError(Box::new(e)) } } + +impl From + for solana_pubsub_client::pubsub_client::PubsubClientError +{ + fn from(e: RemoteAccountProviderError) -> Self { + match e { + RemoteAccountProviderError::PubsubClientError(err) => *err, + _ => solana_pubsub_client::pubsub_client::PubsubClientError::UrlParseError( + url::ParseError::Overflow, + ), + } + } +} diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index 38c62ae22..505b43d26 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -52,6 +52,7 @@ pub mod errors; mod lru_cache; pub mod program_account; pub mod pubsub_common; +pub mod pubsub_connection_pool; mod remote_account; mod subscription_reconciler; diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs index 65296eb7b..75bf67108 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs @@ -100,4 +100,4 @@ pub enum ChainPubsubActorMessage { pub const HELIUS_PER_STREAM_SUBSCRIPTION_LIMIT: usize = 100; pub const SUBSCRIPTION_UPDATE_CHANNEL_SIZE: usize = 5_000; -pub const MESSAGE_CHANNEL_SIZE: usize = 1_000; \ No newline at end of file +pub const MESSAGE_CHANNEL_SIZE: usize = 1_000; diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs new file mode 100644 index 000000000..d29462ff1 --- /dev/null +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -0,0 +1,184 @@ +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +use scc::{ebr::Guard, Queue}; +use solana_pubkey::Pubkey; +use solana_rpc_client_api::config::{ + RpcAccountInfoConfig, RpcProgramAccountsConfig, +}; +use tracing::*; + +use crate::remote_account_provider::chain_pubsub_client::PubsubConnection; + +use super::chain_pubsub_client::{ + ProgramSubscribeResult, PubsubConnectionImpl, SubscribeResult, + UnsubscribeFn, +}; +use super::errors::RemoteAccountProviderResult; + +/// A slot in the connection pool, wrapping a PubSubConnection and +/// tracking its subscription count. +struct PooledConnection { + connection: Arc, + sub_count: Arc, +} + +/// A pool of PubSubConnections that distributes subscriptions across +/// multiple websocket connections to stay within per-stream subscription +/// limits. +pub struct PubSubConnectionPool { + connections: Arc>, + url: String, + per_connection_sub_limit: usize, +} + +impl PubSubConnectionPool { + /// Creates a new pool with a single initial connection. + pub async fn new( + url: String, + limit: usize, + ) -> RemoteAccountProviderResult { + // Creating initial connection also to verify that provider is valid + let connection = + Arc::new(PubsubConnectionImpl::new(url.clone()).await?); + let conn = PooledConnection { + connection, + sub_count: Arc::new(AtomicUsize::new(0)), + }; + let queue = { + let queue = Queue::default(); + queue.push(conn); + queue + }; + Ok(Self { + connections: Arc::new(queue), + url, + per_connection_sub_limit: limit, + }) + } + + /// Returns the websocket URL. + pub fn url(&self) -> &str { + &self.url + } + + /// Subscribes to account updates, distributing across pool slots. + pub async fn account_subscribe( + &self, + pubkey: &Pubkey, + config: RpcAccountInfoConfig, + ) -> SubscribeResult { + let (sub_count, connection) = + match self.find_or_create_connection().await { + Ok(result) => result, + Err(err) => return Err(err.into()), + }; + + // Subscribe using the selected connection + match connection.account_subscribe(pubkey, config).await { + Ok((stream, raw_unsub)) => { + let wrapped_unsub = self.wrap_unsub(raw_unsub, sub_count); + Ok((stream, wrapped_unsub)) + } + Err(err) => { + // Rollback: decrement count + sub_count.fetch_sub(1, Ordering::SeqCst); + Err(err) + } + } + } + + /// Subscribes to program account updates, distributing across pool slots. + pub async fn program_subscribe( + &self, + program_id: &Pubkey, + config: RpcProgramAccountsConfig, + ) -> ProgramSubscribeResult { + let (sub_count, connection) = + match self.find_or_create_connection().await { + Ok(result) => result, + Err(err) => return Err(err.into()), + }; + + // Subscribe using the selected connection + match connection.program_subscribe(program_id, config).await { + Ok((stream, raw_unsub)) => { + let wrapped_unsub = self.wrap_unsub(raw_unsub, sub_count); + Ok((stream, wrapped_unsub)) + } + Err(err) => { + // Rollback: decrement count + sub_count.fetch_sub(1, Ordering::SeqCst); + Err(err) + } + } + } + + /// Reconnects the pool: clears state and reconnects the first slot. + pub fn clear_connections(&self) { + while self.connections.pop().is_some() {} + } + + /// Finds a connection for a new subscription, creating new connections + /// as needed. Returns (sub_count, connection). + async fn find_or_create_connection( + &self, + ) -> RemoteAccountProviderResult<( + Arc, + Arc, + )> { + // Phase 1: Try to find a slot with capacity under lock + + { + let guard = Guard::new(); + if let Some(pooled_conn) = self.pick_connection(&guard) { + let sub_count = Arc::clone(&pooled_conn.sub_count); + sub_count.fetch_add(1, Ordering::SeqCst); + return Ok((sub_count, Arc::clone(&pooled_conn.connection))); + } + } + + // Phase 2: No slot has capacity; create new connection (async) + let new_connection = + Arc::new(PubsubConnectionImpl::new(self.url.clone()).await?); + + // Phase 3: Add new slot to pool under lock + let sub_count = Arc::new(AtomicUsize::new(1)); + let conn = PooledConnection { + connection: Arc::clone(&new_connection), + sub_count: Arc::clone(&sub_count), + }; + self.connections.push(conn); + trace!("Created new pooled connection"); + Ok((sub_count, new_connection)) + } + + /// Picks a slot with available capacity using first-fit. + /// Returns None if no slot has capacity (need to create new connection). + fn pick_connection<'a>( + &self, + guard: &'a Guard, + ) -> Option<&'a PooledConnection> { + self.connections.iter(guard).find(|conn| { + conn.sub_count.load(Ordering::SeqCst) + < self.per_connection_sub_limit + }) + } + + /// Wraps a raw unsubscribe function to also decrement the sub counter for the + /// connection on which it was made. + fn wrap_unsub( + &self, + raw_unsub: UnsubscribeFn, + sub_count: Arc, + ) -> UnsubscribeFn { + Box::new(move || { + Box::pin(async move { + raw_unsub().await; + sub_count.fetch_sub(1, Ordering::SeqCst); + }) + }) + } +} diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index c9b6effd9..11c887e53 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -1700,7 +1700,7 @@ dependencies = [ "ephemeral-rollups-sdk-attribute-ephemeral", "getrandom 0.2.16", "magicblock-delegation-program 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "magicblock-magic-program-api 0.6.1 (git+https://github.com/magicblock-labs/magicblock-validator.git?rev=4d8cded9d)", + "magicblock-magic-program-api 0.6.1", "solana-account", "solana-account-info", "solana-cpi", @@ -2241,10 +2241,10 @@ dependencies = [ [[package]] name = "guinea" -version = "0.6.1" +version = "0.6.2" dependencies = [ "bincode", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "serde", "solana-program", ] @@ -3257,7 +3257,7 @@ dependencies = [ [[package]] name = "magicblock-account-cloner" -version = "0.6.1" +version = "0.6.2" dependencies = [ "async-trait", "bincode", @@ -3267,7 +3267,7 @@ dependencies = [ "magicblock-config", "magicblock-core", "magicblock-ledger", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "magicblock-program", "magicblock-rpc-client", "rand 0.9.2", @@ -3289,7 +3289,7 @@ dependencies = [ [[package]] name = "magicblock-accounts" -version = "0.6.1" +version = "0.6.2" dependencies = [ "async-trait", "magicblock-account-cloner", @@ -3311,7 +3311,7 @@ dependencies = [ [[package]] name = "magicblock-accounts-db" -version = "0.6.1" +version = "0.6.2" dependencies = [ "lmdb-rkv", "magicblock-config", @@ -3326,7 +3326,7 @@ dependencies = [ [[package]] name = "magicblock-aperture" -version = "0.6.1" +version = "0.6.2" dependencies = [ "agave-geyser-plugin-interface", "arc-swap", @@ -3374,7 +3374,7 @@ dependencies = [ [[package]] name = "magicblock-api" -version = "0.6.1" +version = "0.6.2" dependencies = [ "anyhow", "borsh 1.6.0", @@ -3389,7 +3389,7 @@ dependencies = [ "magicblock-config", "magicblock-core", "magicblock-ledger", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "magicblock-metrics", "magicblock-processor", "magicblock-program", @@ -3429,7 +3429,7 @@ dependencies = [ [[package]] name = "magicblock-chainlink" -version = "0.6.1" +version = "0.6.2" dependencies = [ "arc-swap", "async-trait", @@ -3441,7 +3441,7 @@ dependencies = [ "magicblock-config", "magicblock-core", "magicblock-delegation-program 1.1.3 (git+https://github.com/magicblock-labs/delegation-program.git?rev=1874b4f5f5f55cb9ab54b64de2cc0d41107d1435)", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "magicblock-metrics", "parking_lot", "scc", @@ -3483,7 +3483,7 @@ dependencies = [ [[package]] name = "magicblock-committor-program" -version = "0.6.1" +version = "0.6.2" dependencies = [ "borsh 1.6.0", "paste", @@ -3495,7 +3495,7 @@ dependencies = [ [[package]] name = "magicblock-committor-service" -version = "0.6.1" +version = "0.6.2" dependencies = [ "async-trait", "base64 0.21.7", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "magicblock-config" -version = "0.6.1" +version = "0.6.2" dependencies = [ "clap", "derive_more", @@ -3557,10 +3557,10 @@ dependencies = [ [[package]] name = "magicblock-core" -version = "0.6.1" +version = "0.6.2" dependencies = [ "flume", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "solana-account", "solana-account-decoder", "solana-hash", @@ -3616,7 +3616,7 @@ dependencies = [ [[package]] name = "magicblock-ledger" -version = "0.6.1" +version = "0.6.2" dependencies = [ "arc-swap", "bincode", @@ -3657,6 +3657,7 @@ dependencies = [ [[package]] name = "magicblock-magic-program-api" version = "0.6.1" +source = "git+https://github.com/magicblock-labs/magicblock-validator.git?rev=4d8cded9d#4d8cded9d77772baf05462741dee59421e2e413a" dependencies = [ "bincode", "serde", @@ -3665,8 +3666,7 @@ dependencies = [ [[package]] name = "magicblock-magic-program-api" -version = "0.6.1" -source = "git+https://github.com/magicblock-labs/magicblock-validator.git?rev=4d8cded9d#4d8cded9d77772baf05462741dee59421e2e413a" +version = "0.6.2" dependencies = [ "bincode", "serde", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "magicblock-metrics" -version = "0.6.1" +version = "0.6.2" dependencies = [ "http-body-util", "hyper 1.8.1", @@ -3689,7 +3689,7 @@ dependencies = [ [[package]] name = "magicblock-processor" -version = "0.6.1" +version = "0.6.2" dependencies = [ "bincode", "magicblock-accounts-db", @@ -3725,12 +3725,12 @@ dependencies = [ [[package]] name = "magicblock-program" -version = "0.6.1" +version = "0.6.2" dependencies = [ "bincode", "lazy_static", "magicblock-core", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "num-derive", "num-traits", "parking_lot", @@ -3757,7 +3757,7 @@ dependencies = [ [[package]] name = "magicblock-rpc-client" -version = "0.6.1" +version = "0.6.2" dependencies = [ "solana-account", "solana-account-decoder-client-types", @@ -3779,7 +3779,7 @@ dependencies = [ [[package]] name = "magicblock-table-mania" -version = "0.6.1" +version = "0.6.2" dependencies = [ "ed25519-dalek", "magicblock-metrics", @@ -3805,7 +3805,7 @@ dependencies = [ [[package]] name = "magicblock-task-scheduler" -version = "0.6.1" +version = "0.6.2" dependencies = [ "bincode", "chrono", @@ -3831,7 +3831,7 @@ dependencies = [ [[package]] name = "magicblock-validator-admin" -version = "0.6.1" +version = "0.6.2" dependencies = [ "magicblock-delegation-program 1.1.3 (git+https://github.com/magicblock-labs/delegation-program.git?rev=1874b4f5f5f55cb9ab54b64de2cc0d41107d1435)", "magicblock-program", @@ -3848,7 +3848,7 @@ dependencies = [ [[package]] name = "magicblock-version" -version = "0.6.1" +version = "0.6.2" dependencies = [ "git-version", "rustc_version", @@ -4748,7 +4748,7 @@ dependencies = [ "bincode", "borsh 1.6.0", "ephemeral-rollups-sdk", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "serde", "solana-program", ] @@ -4772,7 +4772,7 @@ dependencies = [ "borsh 1.6.0", "ephemeral-rollups-sdk", "magicblock-delegation-program 1.1.3 (git+https://github.com/magicblock-labs/delegation-program.git?rev=1874b4f5f5f55cb9ab54b64de2cc0d41107d1435)", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "rkyv 0.7.45", "solana-program", "static_assertions", @@ -5806,7 +5806,7 @@ dependencies = [ "ephemeral-rollups-sdk", "integration-test-tools", "magicblock-core", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "program-schedulecommit", "rand 0.8.5", "schedulecommit-client", @@ -5824,7 +5824,7 @@ version = "0.0.0" dependencies = [ "integration-test-tools", "magicblock-core", - "magicblock-magic-program-api 0.6.1", + "magicblock-magic-program-api 0.6.2", "program-schedulecommit", "program-schedulecommit-security", "schedulecommit-client", @@ -8543,7 +8543,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "0.6.1" +version = "0.6.2" dependencies = [ "bincode", "bs58", @@ -10044,7 +10044,7 @@ dependencies = [ [[package]] name = "test-kit" -version = "0.6.1" +version = "0.6.2" dependencies = [ "guinea", "magicblock-accounts-db", From 0c21aa8aac3d96b64d212730497540d741866b8f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 6 Feb 2026 21:06:40 +0800 Subject: [PATCH 03/64] chore: extract pubsub connection to separate module Amp-Thread-ID: https://ampcode.com/threads/T-019c330f-5ffe-720e-a1d5-c702b78de081 Co-authored-by: Amp --- .../chain_pubsub_client.rs | 147 +--------------- .../src/remote_account_provider/mod.rs | 1 + .../pubsub_connection.rs | 158 ++++++++++++++++++ .../pubsub_connection_pool.rs | 10 +- 4 files changed, 165 insertions(+), 151 deletions(-) create mode 100644 magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index 29d9c244e..759ca4225 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -1,6 +1,5 @@ use std::{ collections::HashSet, - mem, sync::{ atomic::{AtomicU64, Ordering}, Arc, Mutex, @@ -8,24 +7,11 @@ use std::{ time::Duration, }; -use arc_swap::ArcSwap; use async_trait::async_trait; -use futures_util::{future::BoxFuture, stream::BoxStream}; use magicblock_metrics::metrics; -use solana_account_decoder::UiAccount; use solana_commitment_config::CommitmentConfig; use solana_pubkey::Pubkey; -use solana_pubsub_client::nonblocking::pubsub_client::{ - PubsubClient, PubsubClientResult, -}; -use solana_rpc_client_api::{ - config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, - response::{Response, RpcKeyedAccount}, -}; -use tokio::{ - sync::{mpsc, oneshot, Mutex as AsyncMutex}, - time, -}; +use tokio::sync::{mpsc, oneshot}; use tracing::*; use super::{ @@ -34,137 +20,8 @@ use super::{ pubsub_common::{ChainPubsubActorMessage, SubscriptionUpdate}, }; -pub type UnsubscribeFn = Box BoxFuture<'static, ()> + Send>; -pub type SubscribeResult = PubsubClientResult<( - BoxStream<'static, Response>, - UnsubscribeFn, -)>; -pub type ProgramSubscribeResult = PubsubClientResult<( - BoxStream<'static, Response>, - UnsubscribeFn, -)>; - -const MAX_RECONNECT_ATTEMPTS: usize = 5; -const RECONNECT_ATTEMPT_DELAY: Duration = Duration::from_millis(500); const MAX_RESUB_DELAY_MS: u64 = 800; -#[async_trait] -pub trait PubsubConnection { - fn url(&self) -> &str; - async fn account_subscribe( - &self, - pubkey: &Pubkey, - config: RpcAccountInfoConfig, - ) -> SubscribeResult; - async fn program_subscribe( - &self, - program_id: &Pubkey, - config: RpcProgramAccountsConfig, - ) -> ProgramSubscribeResult; - async fn reconnect(&self) -> PubsubClientResult<()>; -} - -pub struct PubsubConnectionImpl { - client: ArcSwap, - url: String, - reconnect_guard: AsyncMutex<()>, -} - -impl PubsubConnectionImpl { - pub async fn new(url: String) -> RemoteAccountProviderResult { - let client = Arc::new(PubsubClient::new(&url).await?).into(); - let reconnect_guard = AsyncMutex::new(()); - Ok(Self { - client, - url, - reconnect_guard, - }) - } -} - -#[async_trait] -impl PubsubConnection for PubsubConnectionImpl { - fn url(&self) -> &str { - &self.url - } - - async fn account_subscribe( - &self, - pubkey: &Pubkey, - config: RpcAccountInfoConfig, - ) -> SubscribeResult { - let client = self.client.load(); - let config = Some(config.clone()); - let (stream, unsub) = client.account_subscribe(pubkey, config).await?; - // SAFETY: - // the returned stream depends on the used client, which is only ever dropped - // if the connection has been terminated, at which point the stream is useless - // and will be discarded as well, thus it's safe lifetime extension to 'static - let stream = unsafe { - mem::transmute::< - BoxStream<'_, Response>, - BoxStream<'static, Response>, - >(stream) - }; - Ok((stream, unsub)) - } - - async fn program_subscribe( - &self, - program_id: &Pubkey, - config: RpcProgramAccountsConfig, - ) -> ProgramSubscribeResult { - let client = self.client.load(); - let config = Some(config.clone()); - let (stream, unsub) = - client.program_subscribe(program_id, config).await?; - - // SAFETY: - // the returned stream depends on the used client, which is only ever dropped - // if the connection has been terminated, at which point the stream is useless - // and will be discarded as well, thus it's safe lifetime extension to 'static - let stream = unsafe { - mem::transmute::< - BoxStream<'_, Response>, - BoxStream<'static, Response>, - >(stream) - }; - Ok((stream, unsub)) - } - - async fn reconnect(&self) -> PubsubClientResult<()> { - // Prevents multiple reconnect attempts running concurrently - let _guard = match self.reconnect_guard.try_lock() { - Ok(g) => g, - // Reconnect is already in progress - Err(_) => { - // Wait a bit and return to retry subscription - time::sleep(RECONNECT_ATTEMPT_DELAY).await; - return Ok(()); - } - }; - let mut attempt = 1; - let client = loop { - match PubsubClient::new(&self.url).await { - Ok(c) => break Arc::new(c), - Err(error) => { - warn!( - "failed to reconnect to ws endpoint at {} {error}", - self.url - ); - if attempt == MAX_RECONNECT_ATTEMPTS { - return Err(error); - } - attempt += 1; - time::sleep(RECONNECT_ATTEMPT_DELAY).await; - } - } - }; - self.client.store(client); - Ok(()) - } -} - // ----------------- // Trait // ----------------- @@ -681,4 +538,4 @@ pub mod mock { Ok(()) } } -} +} \ No newline at end of file diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index 505b43d26..b20a7184f 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -52,6 +52,7 @@ pub mod errors; mod lru_cache; pub mod program_account; pub mod pubsub_common; +pub mod pubsub_connection; pub mod pubsub_connection_pool; mod remote_account; mod subscription_reconciler; diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs new file mode 100644 index 000000000..69deb02f0 --- /dev/null +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs @@ -0,0 +1,158 @@ +use std::{ + mem, + sync::Arc, +}; + +use arc_swap::ArcSwap; +use async_trait::async_trait; +use futures_util::stream::BoxStream; +use solana_account_decoder::UiAccount; +use solana_pubkey::Pubkey; +use solana_pubsub_client::nonblocking::pubsub_client::{ + PubsubClient, PubsubClientResult, +}; +use solana_rpc_client_api::{ + config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, + response::{Response, RpcKeyedAccount}, +}; +use tokio::{ + sync::Mutex as AsyncMutex, + time, +}; +use tracing::warn; + +use super::errors::RemoteAccountProviderResult; + +pub type UnsubscribeFn = + Box futures_util::future::BoxFuture<'static, ()> + Send>; +pub type SubscribeResult = PubsubClientResult<( + BoxStream<'static, Response>, + UnsubscribeFn, +)>; +pub type ProgramSubscribeResult = PubsubClientResult<( + BoxStream<'static, Response>, + UnsubscribeFn, +)>; + +const MAX_RECONNECT_ATTEMPTS: usize = 5; +const RECONNECT_ATTEMPT_DELAY: std::time::Duration = + std::time::Duration::from_millis(500); + +#[async_trait] +pub trait PubsubConnection { + fn url(&self) -> &str; + async fn account_subscribe( + &self, + pubkey: &Pubkey, + config: RpcAccountInfoConfig, + ) -> SubscribeResult; + async fn program_subscribe( + &self, + program_id: &Pubkey, + config: RpcProgramAccountsConfig, + ) -> ProgramSubscribeResult; + async fn reconnect(&self) -> PubsubClientResult<()>; +} + +pub struct PubsubConnectionImpl { + client: ArcSwap, + url: String, + reconnect_guard: AsyncMutex<()>, +} + +impl PubsubConnectionImpl { + pub async fn new(url: String) -> RemoteAccountProviderResult { + let client = Arc::new(PubsubClient::new(&url).await?).into(); + let reconnect_guard = AsyncMutex::new(()); + Ok(Self { + client, + url, + reconnect_guard, + }) + } +} + +#[async_trait] +impl PubsubConnection for PubsubConnectionImpl { + fn url(&self) -> &str { + &self.url + } + + async fn account_subscribe( + &self, + pubkey: &Pubkey, + config: RpcAccountInfoConfig, + ) -> SubscribeResult { + let client = self.client.load(); + let config = Some(config.clone()); + let (stream, unsub) = client.account_subscribe(pubkey, config).await?; + // SAFETY: + // the returned stream depends on the used client, which is only ever + // dropped if the connection has been terminated, at which point the + // stream is useless and will be discarded as well, thus it's safe + // lifetime extension to 'static + let stream = unsafe { + mem::transmute::< + BoxStream<'_, Response>, + BoxStream<'static, Response>, + >(stream) + }; + Ok((stream, unsub)) + } + + async fn program_subscribe( + &self, + program_id: &Pubkey, + config: RpcProgramAccountsConfig, + ) -> ProgramSubscribeResult { + let client = self.client.load(); + let config = Some(config.clone()); + let (stream, unsub) = + client.program_subscribe(program_id, config).await?; + + // SAFETY: + // the returned stream depends on the used client, which is only ever + // dropped if the connection has been terminated, at which point the + // stream is useless and will be discarded as well, thus it's safe + // lifetime extension to 'static + let stream = unsafe { + mem::transmute::< + BoxStream<'_, Response>, + BoxStream<'static, Response>, + >(stream) + }; + Ok((stream, unsub)) + } + + async fn reconnect(&self) -> PubsubClientResult<()> { + // Prevents multiple reconnect attempts running concurrently + let _guard = match self.reconnect_guard.try_lock() { + Ok(g) => g, + // Reconnect is already in progress + Err(_) => { + // Wait a bit and return to retry subscription + time::sleep(RECONNECT_ATTEMPT_DELAY).await; + return Ok(()); + } + }; + let mut attempt = 1; + let client = loop { + match PubsubClient::new(&self.url).await { + Ok(c) => break Arc::new(c), + Err(error) => { + warn!( + "failed to reconnect to ws endpoint at {} {error}", + self.url + ); + if attempt == MAX_RECONNECT_ATTEMPTS { + return Err(error); + } + attempt += 1; + time::sleep(RECONNECT_ATTEMPT_DELAY).await; + } + } + }; + self.client.store(client); + Ok(()) + } +} diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index d29462ff1..ef49cfece 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -10,11 +10,9 @@ use solana_rpc_client_api::config::{ }; use tracing::*; -use crate::remote_account_provider::chain_pubsub_client::PubsubConnection; - -use super::chain_pubsub_client::{ - ProgramSubscribeResult, PubsubConnectionImpl, SubscribeResult, - UnsubscribeFn, +use super::pubsub_connection::{ + ProgramSubscribeResult, PubsubConnection, PubsubConnectionImpl, + SubscribeResult, UnsubscribeFn, }; use super::errors::RemoteAccountProviderResult; @@ -181,4 +179,4 @@ impl PubSubConnectionPool { }) }) } -} +} \ No newline at end of file From 43d9b7a62b668c2a36e3d22904ee092665af7603 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 6 Feb 2026 21:32:17 +0800 Subject: [PATCH 04/64] test: add mock PubsubConnection and make pool generic Amp-Thread-ID: https://ampcode.com/threads/T-019c330f-e8f6-72c0-9e0e-93bbc1f1b9c2 Co-authored-by: Amp --- .../chain_pubsub_actor.rs | 16 +-- .../chain_pubsub_client.rs | 2 +- .../pubsub_connection.rs | 108 +++++++++++++++--- .../pubsub_connection_pool.rs | 32 +++--- 4 files changed, 116 insertions(+), 42 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index 576514918..15f0f8f1d 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -38,6 +38,7 @@ use crate::remote_account_provider::{ SubscriptionUpdate, MESSAGE_CHANNEL_SIZE, SUBSCRIPTION_UPDATE_CHANNEL_SIZE, }, + pubsub_connection::PubsubConnectionImpl, DEFAULT_SUBSCRIPTION_RETRIES, }; @@ -51,7 +52,7 @@ pub struct ChainPubsubActor { /// Configuration used to create the pubsub client pubsub_client_config: PubsubClientConfig, /// Underlying pubsub connection pool to connect to the chain - pubsub_connection: Arc, + pubsub_connection: Arc>, /// Sends subscribe/unsubscribe messages to this actor messages_sender: mpsc::Sender, /// Map of subscriptions we are holding @@ -93,7 +94,8 @@ impl ChainPubsubActor { ) -> RemoteAccountProviderResult<(Self, mpsc::Receiver)> { let url = pubsub_client_config.pubsub_url.clone(); - let limit = pubsub_client_config.per_stream_subscription_limit + let limit = pubsub_client_config + .per_stream_subscription_limit .unwrap_or(usize::MAX); let pubsub_connection = { let pubsub_pool = PubSubConnectionPool::new(url, limit) @@ -255,7 +257,7 @@ impl ChainPubsubActor { async fn handle_msg( subscriptions: Arc>>, program_subs: Arc>>, - pubsub_connection: Arc, + pubsub_connection: Arc>, subscription_updates_sender: mpsc::Sender, pubsub_client_config: PubsubClientConfig, abort_sender: mpsc::Sender<()>, @@ -411,7 +413,7 @@ impl ChainPubsubActor { sub_response: oneshot::Sender>, subs: Arc>>, program_subs: Arc>>, - pubsub_connection: Arc, + pubsub_connection: Arc>, subscription_updates_sender: mpsc::Sender, abort_sender: mpsc::Sender<()>, is_connected: Arc, @@ -573,7 +575,7 @@ impl ChainPubsubActor { sub_response: oneshot::Sender>, subs: Arc>>, program_subs: Arc>>, - pubsub_connection: Arc, + pubsub_connection: Arc>, subscription_updates_sender: mpsc::Sender, abort_sender: mpsc::Sender<()>, is_connected: Arc, @@ -729,7 +731,7 @@ impl ChainPubsubActor { #[instrument(skip(pubsub_connection, pubsub_client_config, is_connected), fields(client_id = %client_id))] async fn try_reconnect( - pubsub_connection: Arc, + pubsub_connection: Arc>, pubsub_client_config: PubsubClientConfig, client_id: &str, is_connected: Arc, @@ -817,4 +819,4 @@ impl ChainPubsubActor { } }); } -} \ No newline at end of file +} diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index 759ca4225..ac645412b 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -538,4 +538,4 @@ pub mod mock { Ok(()) } } -} \ No newline at end of file +} diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs index 69deb02f0..d6c610809 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs @@ -1,7 +1,4 @@ -use std::{ - mem, - sync::Arc, -}; +use std::{mem, sync::Arc}; use arc_swap::ArcSwap; use async_trait::async_trait; @@ -15,10 +12,7 @@ use solana_rpc_client_api::{ config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, response::{Response, RpcKeyedAccount}, }; -use tokio::{ - sync::Mutex as AsyncMutex, - time, -}; +use tokio::{sync::Mutex as AsyncMutex, time}; use tracing::warn; use super::errors::RemoteAccountProviderResult; @@ -39,7 +33,10 @@ const RECONNECT_ATTEMPT_DELAY: std::time::Duration = std::time::Duration::from_millis(500); #[async_trait] -pub trait PubsubConnection { +pub trait PubsubConnection: Send + Sync + 'static { + async fn new(url: String) -> RemoteAccountProviderResult + where + Self: Sized; fn url(&self) -> &str; async fn account_subscribe( &self, @@ -60,8 +57,9 @@ pub struct PubsubConnectionImpl { reconnect_guard: AsyncMutex<()>, } -impl PubsubConnectionImpl { - pub async fn new(url: String) -> RemoteAccountProviderResult { +#[async_trait] +impl PubsubConnection for PubsubConnectionImpl { + async fn new(url: String) -> RemoteAccountProviderResult { let client = Arc::new(PubsubClient::new(&url).await?).into(); let reconnect_guard = AsyncMutex::new(()); Ok(Self { @@ -70,10 +68,6 @@ impl PubsubConnectionImpl { reconnect_guard, }) } -} - -#[async_trait] -impl PubsubConnection for PubsubConnectionImpl { fn url(&self) -> &str { &self.url } @@ -156,3 +150,87 @@ impl PubsubConnection for PubsubConnectionImpl { Ok(()) } } + +#[cfg(test)] +pub mod mock { + use super::*; + use std::sync::Arc; + use tokio::sync::Mutex; + + #[derive(Clone)] + pub struct MockPubsubConnection { + account_subscriptions: Arc>>, + program_subscriptions: Arc>>, + } + + impl MockPubsubConnection { + pub fn new() -> Self { + Self { + account_subscriptions: Arc::new(Mutex::new(Vec::new())), + program_subscriptions: Arc::new(Mutex::new(Vec::new())), + } + } + + pub async fn account_subs(&self) -> Vec { + self.account_subscriptions.lock().await.clone() + } + + pub async fn program_subs(&self) -> Vec { + self.program_subscriptions.lock().await.clone() + } + + pub async fn clear(&self) { + self.account_subscriptions.lock().await.clear(); + self.program_subscriptions.lock().await.clear(); + } + } + + impl Default for MockPubsubConnection { + fn default() -> Self { + Self::new() + } + } + + #[async_trait] + impl PubsubConnection for MockPubsubConnection { + async fn new(_url: String) -> RemoteAccountProviderResult + where + Self: Sized, + { + Ok(Self::new()) + } + fn url(&self) -> &str { + "mock://" + } + + async fn account_subscribe( + &self, + pubkey: &Pubkey, + _config: RpcAccountInfoConfig, + ) -> SubscribeResult { + self.account_subscriptions.lock().await.push(*pubkey); + + // Return empty stream with no-op unsubscribe + let stream = Box::pin(futures_util::stream::empty()); + let unsubscribe: UnsubscribeFn = Box::new(|| Box::pin(async {})); + Ok((stream, unsubscribe)) + } + + async fn program_subscribe( + &self, + program_id: &Pubkey, + _config: RpcProgramAccountsConfig, + ) -> ProgramSubscribeResult { + self.program_subscriptions.lock().await.push(*program_id); + + // Return empty stream with no-op unsubscribe + let stream = Box::pin(futures_util::stream::empty()); + let unsubscribe: UnsubscribeFn = Box::new(|| Box::pin(async {})); + Ok((stream, unsubscribe)) + } + + async fn reconnect(&self) -> PubsubClientResult<()> { + Ok(()) + } + } +} diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index ef49cfece..31c63d68c 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -10,37 +10,35 @@ use solana_rpc_client_api::config::{ }; use tracing::*; +use super::errors::RemoteAccountProviderResult; use super::pubsub_connection::{ - ProgramSubscribeResult, PubsubConnection, PubsubConnectionImpl, - SubscribeResult, UnsubscribeFn, + ProgramSubscribeResult, PubsubConnection, SubscribeResult, UnsubscribeFn, }; -use super::errors::RemoteAccountProviderResult; /// A slot in the connection pool, wrapping a PubSubConnection and /// tracking its subscription count. -struct PooledConnection { - connection: Arc, +struct PooledConnection { + connection: Arc, sub_count: Arc, } /// A pool of PubSubConnections that distributes subscriptions across /// multiple websocket connections to stay within per-stream subscription /// limits. -pub struct PubSubConnectionPool { - connections: Arc>, +pub struct PubSubConnectionPool { + connections: Arc>>, url: String, per_connection_sub_limit: usize, } -impl PubSubConnectionPool { +impl PubSubConnectionPool { /// Creates a new pool with a single initial connection. pub async fn new( url: String, limit: usize, - ) -> RemoteAccountProviderResult { + ) -> RemoteAccountProviderResult> { // Creating initial connection also to verify that provider is valid - let connection = - Arc::new(PubsubConnectionImpl::new(url.clone()).await?); + let connection = Arc::new(T::new(url.clone()).await?); let conn = PooledConnection { connection, sub_count: Arc::new(AtomicUsize::new(0)), @@ -123,10 +121,7 @@ impl PubSubConnectionPool { /// as needed. Returns (sub_count, connection). async fn find_or_create_connection( &self, - ) -> RemoteAccountProviderResult<( - Arc, - Arc, - )> { + ) -> RemoteAccountProviderResult<(Arc, Arc)> { // Phase 1: Try to find a slot with capacity under lock { @@ -139,8 +134,7 @@ impl PubSubConnectionPool { } // Phase 2: No slot has capacity; create new connection (async) - let new_connection = - Arc::new(PubsubConnectionImpl::new(self.url.clone()).await?); + let new_connection = Arc::new(T::new(self.url.clone()).await?); // Phase 3: Add new slot to pool under lock let sub_count = Arc::new(AtomicUsize::new(1)); @@ -158,7 +152,7 @@ impl PubSubConnectionPool { fn pick_connection<'a>( &self, guard: &'a Guard, - ) -> Option<&'a PooledConnection> { + ) -> Option<&'a PooledConnection> { self.connections.iter(guard).find(|conn| { conn.sub_count.load(Ordering::SeqCst) < self.per_connection_sub_limit @@ -179,4 +173,4 @@ impl PubSubConnectionPool { }) }) } -} \ No newline at end of file +} From bb2b31d5d577888ba82ff4e6166255dde1ec06e6 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Sat, 7 Feb 2026 12:12:41 +0800 Subject: [PATCH 05/64] chore: account sub tests for pool --- .../pubsub_connection.rs | 21 +- .../pubsub_connection_pool.rs | 188 ++++++++++++++++++ 2 files changed, 198 insertions(+), 11 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs index d6c610809..ab4fa52a5 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs @@ -154,8 +154,7 @@ impl PubsubConnection for PubsubConnectionImpl { #[cfg(test)] pub mod mock { use super::*; - use std::sync::Arc; - use tokio::sync::Mutex; + use std::sync::{Arc, Mutex}; #[derive(Clone)] pub struct MockPubsubConnection { @@ -171,17 +170,17 @@ pub mod mock { } } - pub async fn account_subs(&self) -> Vec { - self.account_subscriptions.lock().await.clone() + pub fn account_subs(&self) -> Vec { + self.account_subscriptions.lock().unwrap().clone() } - pub async fn program_subs(&self) -> Vec { - self.program_subscriptions.lock().await.clone() + pub fn program_subs(&self) -> Vec { + self.program_subscriptions.lock().unwrap().clone() } - pub async fn clear(&self) { - self.account_subscriptions.lock().await.clear(); - self.program_subscriptions.lock().await.clear(); + pub fn clear(&self) { + self.account_subscriptions.lock().unwrap().clear(); + self.program_subscriptions.lock().unwrap().clear(); } } @@ -208,7 +207,7 @@ pub mod mock { pubkey: &Pubkey, _config: RpcAccountInfoConfig, ) -> SubscribeResult { - self.account_subscriptions.lock().await.push(*pubkey); + self.account_subscriptions.lock().unwrap().push(*pubkey); // Return empty stream with no-op unsubscribe let stream = Box::pin(futures_util::stream::empty()); @@ -221,7 +220,7 @@ pub mod mock { program_id: &Pubkey, _config: RpcProgramAccountsConfig, ) -> ProgramSubscribeResult { - self.program_subscriptions.lock().await.push(*program_id); + self.program_subscriptions.lock().unwrap().push(*program_id); // Return empty stream with no-op unsubscribe let stream = Box::pin(futures_util::stream::empty()); diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index 31c63d68c..504135574 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -22,6 +22,15 @@ struct PooledConnection { sub_count: Arc, } +impl Clone for PooledConnection { + fn clone(&self) -> Self { + Self { + connection: Arc::clone(&self.connection), + sub_count: Arc::clone(&self.sub_count), + } + } +} + /// A pool of PubSubConnections that distributes subscriptions across /// multiple websocket connections to stay within per-stream subscription /// limits. @@ -174,3 +183,182 @@ impl PubSubConnectionPool { }) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::remote_account_provider::pubsub_connection::mock::MockPubsubConnection; + use solana_pubkey::Pubkey; + + fn get_connection_at_index( + pool: &PubSubConnectionPool, + index: usize, + ) -> Option> { + let guard = Guard::new(); + let mut iter = pool.connections.iter(&guard); + iter.nth(index).cloned() + } + + fn assert_account_subs( + pool: &PubSubConnectionPool, + conn_subs: &[Vec], + ) { + for (idx, expected_subs) in conn_subs.iter().enumerate() { + let conn = get_connection_at_index(pool, idx).unwrap(); + assert_eq!( + conn.sub_count.load(Ordering::SeqCst), + expected_subs.len() + ); + for pubkey in expected_subs { + assert!(conn.connection.account_subs().contains(pubkey)); + } + } + } + + async fn create_pool( + limit: usize, + ) -> PubSubConnectionPool { + PubSubConnectionPool::::new( + "mock://".to_string(), + limit, + ) + .await + .unwrap() + } + + async fn account_subscribe( + pool: &PubSubConnectionPool, + pubkey: &Pubkey, + ) -> UnsubscribeFn { + let (_stream, unsub) = pool + .account_subscribe(pubkey, RpcAccountInfoConfig::default()) + .await + .unwrap(); + unsub + } + + #[tokio::test] + async fn test_single_sub() { + let pool = create_pool(2).await; + let pk1 = Pubkey::new_unique(); + + let _unsub1 = account_subscribe(&pool, &pk1).await; + + assert_account_subs(&pool, &[vec![pk1]]); + } + + #[tokio::test] + async fn test_two_subs_one_connection() { + let pool = create_pool(2).await; + let pk1 = Pubkey::new_unique(); + let pk2 = Pubkey::new_unique(); + + let _unsub1 = account_subscribe(&pool, &pk1).await; + let _unsub2 = account_subscribe(&pool, &pk2).await; + + assert_account_subs(&pool, &[vec![pk1, pk2]]); + } + + #[tokio::test] + async fn test_three_subs_two_connections() { + let pool = create_pool(2).await; + let pk1 = Pubkey::new_unique(); + let pk2 = Pubkey::new_unique(); + let pk3 = Pubkey::new_unique(); + + let _unsub1 = account_subscribe(&pool, &pk1).await; + let _unsub2 = account_subscribe(&pool, &pk2).await; + let _unsub3 = account_subscribe(&pool, &pk3).await; + + assert_account_subs(&pool, &[vec![pk1, pk2], vec![pk3]]); + } + + #[tokio::test] + async fn test_unsub_frees_slot_and_new_sub_fills_it() { + let pool = create_pool(2).await; + let pk1 = Pubkey::new_unique(); + let pk2 = Pubkey::new_unique(); + let pk3 = Pubkey::new_unique(); + let pk4 = Pubkey::new_unique(); + let pk5 = Pubkey::new_unique(); + + // Fill 2 connections: [pk1, pk2] and [pk3, pk4] + let _unsub1 = account_subscribe(&pool, &pk1).await; + let unsub2 = account_subscribe(&pool, &pk2).await; + let _unsub3 = account_subscribe(&pool, &pk3).await; + let _unsub4 = account_subscribe(&pool, &pk4).await; + + assert_account_subs(&pool, &[vec![pk1, pk2], vec![pk3, pk4]]); + + // Unsubscribe pk2 from connection 0, freeing a slot + unsub2().await; + + // New sub should go to connection 0 (first with capacity) + let _unsub5 = account_subscribe(&pool, &pk5).await; + + // conn0 now has pk1+pk5 (sub_count=2), conn1 unchanged + assert_account_subs( + &pool, + &[vec![pk1, pk5], vec![pk3, pk4]], + ); + } + + #[tokio::test] + async fn test_elaborate_sub_unsub_lifecycle() { + let pool = create_pool(2).await; + let pks: Vec = (0..8).map(|_| Pubkey::new_unique()).collect(); + + // Sub pk0, pk1 -> conn0 full + let unsub0 = account_subscribe(&pool, &pks[0]).await; + let unsub1 = account_subscribe(&pool, &pks[1]).await; + assert_account_subs(&pool, &[vec![pks[0], pks[1]]]); + + // Sub pk2 -> conn1 created + let _unsub2 = account_subscribe(&pool, &pks[2]).await; + assert_account_subs(&pool, &[vec![pks[0], pks[1]], vec![pks[2]]]); + + // Sub pk3 -> conn1 full + let unsub3 = account_subscribe(&pool, &pks[3]).await; + assert_account_subs( + &pool, + &[vec![pks[0], pks[1]], vec![pks[2], pks[3]]], + ); + + // Sub pk4 -> conn2 created + let _unsub4 = account_subscribe(&pool, &pks[4]).await; + assert_account_subs( + &pool, + &[vec![pks[0], pks[1]], vec![pks[2], pks[3]], vec![pks[4]]], + ); + + // Unsub pk0 from conn0 -> conn0 has capacity + unsub0().await; + + // Sub pk5 -> goes to conn0 (first with capacity) + let _unsub5 = account_subscribe(&pool, &pks[5]).await; + assert_account_subs( + &pool, + &[ + vec![pks[1], pks[5]], + vec![pks[2], pks[3]], + vec![pks[4]], + ], + ); + + // Unsub pk1, pk3 -> conn0 and conn1 each drop to 1 + unsub1().await; + unsub3().await; + + // Sub pk6 -> fills conn0, pk7 -> fills conn1 + let _unsub6 = account_subscribe(&pool, &pks[6]).await; + let _unsub7 = account_subscribe(&pool, &pks[7]).await; + assert_account_subs( + &pool, + &[ + vec![pks[5], pks[6]], + vec![pks[2], pks[7]], + vec![pks[4]], + ], + ); + } +} \ No newline at end of file From f220952a0027acde9a7d97ce8f2672618294a56d Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Sat, 7 Feb 2026 15:30:58 +0800 Subject: [PATCH 06/64] test: add comprehensive tests for pubsub pool account and program subscriptions Amp-Thread-ID: https://ampcode.com/threads/T-019c3702-6f33-74eb-89d9-fad10aac35ed Co-authored-by: Amp --- .../pubsub_connection.rs | 3 +- .../pubsub_connection_pool.rs | 358 +++++++++++++++--- 2 files changed, 315 insertions(+), 46 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs index ab4fa52a5..4f541c41a 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection.rs @@ -153,9 +153,10 @@ impl PubsubConnection for PubsubConnectionImpl { #[cfg(test)] pub mod mock { - use super::*; use std::sync::{Arc, Mutex}; + use super::*; + #[derive(Clone)] pub struct MockPubsubConnection { account_subscriptions: Arc>>, diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index 504135574..d8b14436e 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -10,9 +10,12 @@ use solana_rpc_client_api::config::{ }; use tracing::*; -use super::errors::RemoteAccountProviderResult; -use super::pubsub_connection::{ - ProgramSubscribeResult, PubsubConnection, SubscribeResult, UnsubscribeFn, +use super::{ + errors::RemoteAccountProviderResult, + pubsub_connection::{ + ProgramSubscribeResult, PubsubConnection, SubscribeResult, + UnsubscribeFn, + }, }; /// A slot in the connection pool, wrapping a PubSubConnection and @@ -186,9 +189,10 @@ impl PubSubConnectionPool { #[cfg(test)] mod tests { + use solana_pubkey::Pubkey; + use super::*; use crate::remote_account_provider::pubsub_connection::mock::MockPubsubConnection; - use solana_pubkey::Pubkey; fn get_connection_at_index( pool: &PubSubConnectionPool, @@ -237,128 +241,392 @@ mod tests { unsub } + fn assert_program_subs( + pool: &PubSubConnectionPool, + conn_subs: &[Vec], + ) { + for (idx, expected_subs) in conn_subs.iter().enumerate() { + let conn = get_connection_at_index(pool, idx).unwrap(); + assert_eq!( + conn.sub_count.load(Ordering::SeqCst), + expected_subs.len() + ); + for pubkey in expected_subs { + assert!(conn.connection.program_subs().contains(pubkey)); + } + } + } + + async fn program_subscribe( + pool: &PubSubConnectionPool, + program_id: &Pubkey, + ) -> UnsubscribeFn { + let (_stream, unsub) = pool + .program_subscribe(program_id, RpcProgramAccountsConfig::default()) + .await + .unwrap(); + unsub + } + + fn create_pubkeys() -> [Pubkey; N] { + (0..N) + .map(|_| Pubkey::new_unique()) + .collect::>() + .try_into() + .unwrap() + } + #[tokio::test] async fn test_single_sub() { let pool = create_pool(2).await; let pk1 = Pubkey::new_unique(); + // Sub account(pk1) -> Conn0 (1/2) let _unsub1 = account_subscribe(&pool, &pk1).await; - + // Final: Conn0 (pk1) assert_account_subs(&pool, &[vec![pk1]]); } #[tokio::test] async fn test_two_subs_one_connection() { let pool = create_pool(2).await; - let pk1 = Pubkey::new_unique(); - let pk2 = Pubkey::new_unique(); + let [pk1, pk2] = create_pubkeys(); + // Sub account(pk1) -> Conn0 (1/2) let _unsub1 = account_subscribe(&pool, &pk1).await; + // Sub account(pk2) -> Conn0 (2/2 FULL) let _unsub2 = account_subscribe(&pool, &pk2).await; - + // Final: Conn0 (pk1, pk2) assert_account_subs(&pool, &[vec![pk1, pk2]]); } #[tokio::test] async fn test_three_subs_two_connections() { let pool = create_pool(2).await; - let pk1 = Pubkey::new_unique(); - let pk2 = Pubkey::new_unique(); - let pk3 = Pubkey::new_unique(); + let [pk1, pk2, pk3] = create_pubkeys(); + // Sub account(pk1) -> Conn0 (1/2) let _unsub1 = account_subscribe(&pool, &pk1).await; + // Sub account(pk2) -> Conn0 (2/2 FULL) let _unsub2 = account_subscribe(&pool, &pk2).await; + // Sub account(pk3) -> Conn1 created (1/2) [Conn0 is full] let _unsub3 = account_subscribe(&pool, &pk3).await; - + // Final: Conn0 (pk1, pk2), Conn1 (pk3) assert_account_subs(&pool, &[vec![pk1, pk2], vec![pk3]]); } #[tokio::test] async fn test_unsub_frees_slot_and_new_sub_fills_it() { let pool = create_pool(2).await; - let pk1 = Pubkey::new_unique(); - let pk2 = Pubkey::new_unique(); - let pk3 = Pubkey::new_unique(); - let pk4 = Pubkey::new_unique(); - let pk5 = Pubkey::new_unique(); + let [pk1, pk2, pk3, pk4, pk5] = create_pubkeys(); - // Fill 2 connections: [pk1, pk2] and [pk3, pk4] + // Fill Conn0 with pk1, pk2 let _unsub1 = account_subscribe(&pool, &pk1).await; let unsub2 = account_subscribe(&pool, &pk2).await; + // Create Conn1, fill with pk3, pk4 let _unsub3 = account_subscribe(&pool, &pk3).await; let _unsub4 = account_subscribe(&pool, &pk4).await; - assert_account_subs(&pool, &[vec![pk1, pk2], vec![pk3, pk4]]); - // Unsubscribe pk2 from connection 0, freeing a slot + // Unsub pk2 from Conn0, freeing a slot unsub2().await; - // New sub should go to connection 0 (first with capacity) + // Sub pk5 goes to Conn0 (first-fit) let _unsub5 = account_subscribe(&pool, &pk5).await; - - // conn0 now has pk1+pk5 (sub_count=2), conn1 unchanged - assert_account_subs( - &pool, - &[vec![pk1, pk5], vec![pk3, pk4]], - ); + // Final: Conn0 (pk1, pk5), Conn1 (pk3, pk4) + assert_account_subs(&pool, &[vec![pk1, pk5], vec![pk3, pk4]]); } #[tokio::test] async fn test_elaborate_sub_unsub_lifecycle() { + // Complex lifecycle: sub/unsub across 3 connections let pool = create_pool(2).await; - let pks: Vec = (0..8).map(|_| Pubkey::new_unique()).collect(); + let pks = create_pubkeys::<8>(); - // Sub pk0, pk1 -> conn0 full + // Sub pk0, pk1 -> Conn0 full let unsub0 = account_subscribe(&pool, &pks[0]).await; let unsub1 = account_subscribe(&pool, &pks[1]).await; assert_account_subs(&pool, &[vec![pks[0], pks[1]]]); - // Sub pk2 -> conn1 created + // Sub pk2 -> Conn1 created let _unsub2 = account_subscribe(&pool, &pks[2]).await; assert_account_subs(&pool, &[vec![pks[0], pks[1]], vec![pks[2]]]); - // Sub pk3 -> conn1 full + // Sub pk3 -> Conn1 full let unsub3 = account_subscribe(&pool, &pks[3]).await; assert_account_subs( &pool, &[vec![pks[0], pks[1]], vec![pks[2], pks[3]]], ); - // Sub pk4 -> conn2 created + // Sub pk4 -> Conn2 created let _unsub4 = account_subscribe(&pool, &pks[4]).await; assert_account_subs( &pool, &[vec![pks[0], pks[1]], vec![pks[2], pks[3]], vec![pks[4]]], ); - // Unsub pk0 from conn0 -> conn0 has capacity + // Unsub pk0 from Conn0 -> Conn0 has capacity unsub0().await; - // Sub pk5 -> goes to conn0 (first with capacity) + // Sub pk5 -> goes to Conn0 (first-fit) let _unsub5 = account_subscribe(&pool, &pks[5]).await; assert_account_subs( &pool, - &[ - vec![pks[1], pks[5]], - vec![pks[2], pks[3]], - vec![pks[4]], - ], + &[vec![pks[1], pks[5]], vec![pks[2], pks[3]], vec![pks[4]]], ); - // Unsub pk1, pk3 -> conn0 and conn1 each drop to 1 + // Unsub pk1, pk3 -> Conn0 and Conn1 each drop to 1 unsub1().await; unsub3().await; - // Sub pk6 -> fills conn0, pk7 -> fills conn1 + // Sub pk6 -> fills Conn0, pk7 -> fills Conn1 let _unsub6 = account_subscribe(&pool, &pks[6]).await; let _unsub7 = account_subscribe(&pool, &pks[7]).await; + // Final: Conn0 (pk5, pk6), Conn1 (pk2, pk7), Conn2 (pk4) assert_account_subs( + &pool, + &[vec![pks[5], pks[6]], vec![pks[2], pks[7]], vec![pks[4]]], + ); + } + + #[tokio::test] + async fn test_program_single_sub() { + let pool = create_pool(2).await; + let pid1 = Pubkey::new_unique(); + + // Sub program(pid1) -> Conn0 (1/2) + let _unsub1 = program_subscribe(&pool, &pid1).await; + // Final: Conn0 (pid1) + assert_program_subs(&pool, &[vec![pid1]]); + } + + #[tokio::test] + async fn test_program_two_subs_one_connection() { + let pool = create_pool(2).await; + let [pid1, pid2] = create_pubkeys(); + + // Sub program(pid1) -> Conn0 (1/2) + let _unsub1 = program_subscribe(&pool, &pid1).await; + // Sub program(pid2) -> Conn0 (2/2 FULL) + let _unsub2 = program_subscribe(&pool, &pid2).await; + // Final: Conn0 (pid1, pid2) + assert_program_subs(&pool, &[vec![pid1, pid2]]); + } + + #[tokio::test] + async fn test_program_three_subs_two_connections() { + let pool = create_pool(2).await; + let [pid1, pid2, pid3] = create_pubkeys(); + + // Sub program(pid1) -> Conn0 (1/2) + let _unsub1 = program_subscribe(&pool, &pid1).await; + // Sub program(pid2) -> Conn0 (2/2 FULL) + let _unsub2 = program_subscribe(&pool, &pid2).await; + // Sub program(pid3) -> Conn1 created (1/2) [Conn0 is full] + let _unsub3 = program_subscribe(&pool, &pid3).await; + // Final: Conn0 (pid1, pid2), Conn1 (pid3) + assert_program_subs(&pool, &[vec![pid1, pid2], vec![pid3]]); + } + + #[tokio::test] + async fn test_program_unsub_frees_slot_and_new_sub_fills_it() { + let pool = create_pool(2).await; + let [pid1, pid2, pid3, pid4, pid5] = create_pubkeys(); + + // Fill Conn0 with pid1, pid2 + let _unsub1 = program_subscribe(&pool, &pid1).await; + let unsub2 = program_subscribe(&pool, &pid2).await; + // Create Conn1, fill with pid3, pid4 + let _unsub3 = program_subscribe(&pool, &pid3).await; + let _unsub4 = program_subscribe(&pool, &pid4).await; + assert_program_subs(&pool, &[vec![pid1, pid2], vec![pid3, pid4]]); + + // Unsub pid2 from Conn0, freeing a slot + unsub2().await; + + // Sub pid5 goes to Conn0 (first-fit) + let _unsub5 = program_subscribe(&pool, &pid5).await; + // Final: Conn0 (pid1, pid5), Conn1 (pid3, pid4) + assert_program_subs(&pool, &[vec![pid1, pid5], vec![pid3, pid4]]); + } + + fn assert_mixed_subs( + pool: &PubSubConnectionPool, + conn_subs: &[(Vec, Vec)], + ) { + for (idx, (expected_account, expected_program)) in + conn_subs.iter().enumerate() + { + let conn = get_connection_at_index(pool, idx).unwrap(); + let expected_total = + expected_account.len() + expected_program.len(); + assert_eq!(conn.sub_count.load(Ordering::SeqCst), expected_total); + for pubkey in expected_account { + assert!(conn.connection.account_subs().contains(pubkey)); + } + for pubkey in expected_program { + assert!(conn.connection.program_subs().contains(pubkey)); + } + } + } + + #[tokio::test] + async fn test_mixed_subs_respect_limit() { + // Accounts and programs both count toward the per-connection limit + let pool = create_pool(2).await; + let ak1 = Pubkey::new_unique(); + let pk1 = Pubkey::new_unique(); + let ak2 = Pubkey::new_unique(); + + // Sub account(ak1) -> Conn0 (1/2) + let _unsub_a1 = account_subscribe(&pool, &ak1).await; + // Sub program(pk1) -> Conn0 (2/2 FULL) + let _unsub_p1 = program_subscribe(&pool, &pk1).await; + // Final: Conn0 (ak1 + pk1) + assert_mixed_subs(&pool, &[(vec![ak1], vec![pk1])]); + + // Sub account(ak2) -> Conn1 created (1/2) [Conn0 is full] + let _unsub_a2 = account_subscribe(&pool, &ak2).await; + // Final: Conn0 (ak1 + pk1), Conn1 (ak2) + assert_mixed_subs( + &pool, + &[(vec![ak1], vec![pk1]), (vec![ak2], vec![])], + ); + } + + #[tokio::test] + async fn test_mixed_elaborate_sub_unsub_lifecycle() { + // Complex mixed lifecycle: accounts and programs inter-mixed across 3 connections + let pool = create_pool(2).await; + let aks = create_pubkeys::<4>(); + let pks = create_pubkeys::<4>(); + + // Account + program on Conn0 -> full + let unsub_a0 = account_subscribe(&pool, &aks[0]).await; + let unsub_p0 = program_subscribe(&pool, &pks[0]).await; + assert_mixed_subs(&pool, &[(vec![aks[0]], vec![pks[0]])]); + + // Next account -> Conn1 created + let _unsub_a1 = account_subscribe(&pool, &aks[1]).await; + assert_mixed_subs( + &pool, + &[(vec![aks[0]], vec![pks[0]]), (vec![aks[1]], vec![])], + ); + + // Next program -> Conn1 full + let unsub_p1 = program_subscribe(&pool, &pks[1]).await; + assert_mixed_subs( + &pool, + &[(vec![aks[0]], vec![pks[0]]), (vec![aks[1]], vec![pks[1]])], + ); + + // Next account -> Conn2 created + let _unsub_a2 = account_subscribe(&pool, &aks[2]).await; + assert_mixed_subs( &pool, &[ - vec![pks[5], pks[6]], - vec![pks[2], pks[7]], - vec![pks[4]], + (vec![aks[0]], vec![pks[0]]), + (vec![aks[1]], vec![pks[1]]), + (vec![aks[2]], vec![]), + ], + ); + + // Unsub account from Conn0 -> Conn0 has capacity + unsub_a0().await; + + // Next program -> goes to Conn0 (first-fit) + let _unsub_p2 = program_subscribe(&pool, &pks[2]).await; + assert_mixed_subs( + &pool, + &[ + (vec![], vec![pks[0], pks[2]]), + (vec![aks[1]], vec![pks[1]]), + (vec![aks[2]], vec![]), + ], + ); + + // Unsub program from Conn0 and Conn1 + unsub_p0().await; + unsub_p1().await; + + // Next account -> Conn0 (first with capacity) + let _unsub_a3 = account_subscribe(&pool, &aks[3]).await; + // Next program -> Conn1 (Conn0 now full, Conn1 has capacity) + let _unsub_p3 = program_subscribe(&pool, &pks[3]).await; + // Final: Conn0 (ak3 + pk2), Conn1 (ak1 + pk3), Conn2 (ak2) + assert_mixed_subs( + &pool, + &[ + (vec![aks[3]], vec![pks[2]]), + (vec![aks[1]], vec![pks[3]]), + (vec![aks[2]], vec![]), ], ); } -} \ No newline at end of file + + #[tokio::test] + async fn test_program_elaborate_sub_unsub_lifecycle() { + // Complex lifecycle: program subscriptions with strategic unsubs across 3 connections + let pool = create_pool(2).await; + let pids = create_pubkeys::<8>(); + + // Sub pid0, pid1 -> Conn0 full + let unsub0 = program_subscribe(&pool, &pids[0]).await; + let unsub1 = program_subscribe(&pool, &pids[1]).await; + assert_program_subs(&pool, &[vec![pids[0], pids[1]]]); + + // Sub pid2 -> Conn1 created + let _unsub2 = program_subscribe(&pool, &pids[2]).await; + assert_program_subs(&pool, &[vec![pids[0], pids[1]], vec![pids[2]]]); + + // Sub pid3 -> Conn1 full + let unsub3 = program_subscribe(&pool, &pids[3]).await; + assert_program_subs( + &pool, + &[vec![pids[0], pids[1]], vec![pids[2], pids[3]]], + ); + + // Sub pid4 -> Conn2 created + let _unsub4 = program_subscribe(&pool, &pids[4]).await; + assert_program_subs( + &pool, + &[ + vec![pids[0], pids[1]], + vec![pids[2], pids[3]], + vec![pids[4]], + ], + ); + + // Unsub pid0 -> Conn0 has capacity + unsub0().await; + + // Sub pid5 -> Conn0 (first-fit) + let _unsub5 = program_subscribe(&pool, &pids[5]).await; + assert_program_subs( + &pool, + &[ + vec![pids[1], pids[5]], + vec![pids[2], pids[3]], + vec![pids[4]], + ], + ); + + // Unsub pid1, pid3 -> Conn0 and Conn1 each drop to 1 + unsub1().await; + unsub3().await; + + // Sub pid6 -> Conn0 (first-fit), pid7 -> Conn1 + let _unsub6 = program_subscribe(&pool, &pids[6]).await; + let _unsub7 = program_subscribe(&pool, &pids[7]).await; + // Final: Conn0 (pid5, pid6), Conn1 (pid2, pid7), Conn2 (pid4) + assert_program_subs( + &pool, + &[ + vec![pids[5], pids[6]], + vec![pids[2], pids[7]], + vec![pids[4]], + ], + ); + } +} From 50a86bd5391fb4c15b4d4f2fd529548b8738e60f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Sat, 7 Feb 2026 16:17:23 +0800 Subject: [PATCH 07/64] chore: fix overkill and incomplete error conversion --- .../src/remote_account_provider/errors.rs | 14 -------------- .../pubsub_connection_pool.rs | 17 +++++++++++++++-- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/errors.rs b/magicblock-chainlink/src/remote_account_provider/errors.rs index 8db91fa02..cc1f255cf 100644 --- a/magicblock-chainlink/src/remote_account_provider/errors.rs +++ b/magicblock-chainlink/src/remote_account_provider/errors.rs @@ -112,7 +112,6 @@ pub enum RemoteAccountProviderError { )] LoaderV4StateDeserializationFailed(Pubkey, String), } - impl From for RemoteAccountProviderError { @@ -120,16 +119,3 @@ impl From Self::PubsubClientError(Box::new(e)) } } - -impl From - for solana_pubsub_client::pubsub_client::PubsubClientError -{ - fn from(e: RemoteAccountProviderError) -> Self { - match e { - RemoteAccountProviderError::PubsubClientError(err) => *err, - _ => solana_pubsub_client::pubsub_client::PubsubClientError::UrlParseError( - url::ParseError::Overflow, - ), - } - } -} diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index d8b14436e..6cc933c31 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -5,6 +5,7 @@ use std::sync::{ use scc::{ebr::Guard, Queue}; use solana_pubkey::Pubkey; +use solana_pubsub_client::pubsub_client::PubsubClientError; use solana_rpc_client_api::config::{ RpcAccountInfoConfig, RpcProgramAccountsConfig, }; @@ -81,7 +82,13 @@ impl PubSubConnectionPool { let (sub_count, connection) = match self.find_or_create_connection().await { Ok(result) => result, - Err(err) => return Err(err.into()), + Err(err) => { + return Err(PubsubClientError::SubscribeFailed { + reason: "Unable to find or create connection" + .to_string(), + message: format!("{err:?}"), + }); + } }; // Subscribe using the selected connection @@ -107,7 +114,13 @@ impl PubSubConnectionPool { let (sub_count, connection) = match self.find_or_create_connection().await { Ok(result) => result, - Err(err) => return Err(err.into()), + Err(err) => { + return Err(PubsubClientError::SubscribeFailed { + reason: "Unable to find or create connection" + .to_string(), + message: format!("{err:?}"), + }); + } }; // Subscribe using the selected connection From 4512806340def5afff486008788a79e30b4644ec Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Sat, 7 Feb 2026 20:17:14 +0800 Subject: [PATCH 08/64] chore: prevent multi connection creation race condition --- .../pubsub_connection_pool.rs | 77 ++++++++++++++----- 1 file changed, 57 insertions(+), 20 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index 6cc933c31..1a0593397 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -3,6 +3,8 @@ use std::sync::{ Arc, }; +use tokio::sync::Mutex as AsyncMutex; + use scc::{ebr::Guard, Queue}; use solana_pubkey::Pubkey; use solana_pubsub_client::pubsub_client::PubsubClientError; @@ -42,6 +44,7 @@ pub struct PubSubConnectionPool { connections: Arc>>, url: String, per_connection_sub_limit: usize, + new_connection_guard: AsyncMutex<()>, } impl PubSubConnectionPool { @@ -65,6 +68,7 @@ impl PubSubConnectionPool { connections: Arc::new(queue), url, per_connection_sub_limit: limit, + new_connection_guard: AsyncMutex::new(()), }) } @@ -147,41 +151,74 @@ impl PubSubConnectionPool { async fn find_or_create_connection( &self, ) -> RemoteAccountProviderResult<(Arc, Arc)> { - // Phase 1: Try to find a slot with capacity under lock - - { + fn try_reserve_connection( + pool: &PubSubConnectionPool, + ) -> Option<(Arc, Arc)> { let guard = Guard::new(); - if let Some(pooled_conn) = self.pick_connection(&guard) { - let sub_count = Arc::clone(&pooled_conn.sub_count); - sub_count.fetch_add(1, Ordering::SeqCst); - return Ok((sub_count, Arc::clone(&pooled_conn.connection))); - } + pool.try_reserve_slot(&guard) } - // Phase 2: No slot has capacity; create new connection (async) - let new_connection = Arc::new(T::new(self.url.clone()).await?); + // Phase 1: fast path — try to reserve a slot without locking + if let Some(result) = try_reserve_connection(self) { + return Ok(result); + } + + // Serialize connection creation + let _new_conn_guard = self.new_connection_guard.lock().await; + + // Phase 2: re-check under lock — another task may have + // created a connection while we waited + if let Some(result) = try_reserve_connection(self) { + return Ok(result); + } - // Phase 3: Add new slot to pool under lock + // Phase 3: still no capacity — create and push new connection + let new_connection = Arc::new(T::new(self.url.clone()).await?); let sub_count = Arc::new(AtomicUsize::new(1)); let conn = PooledConnection { connection: Arc::clone(&new_connection), sub_count: Arc::clone(&sub_count), }; self.connections.push(conn); - trace!("Created new pooled connection"); + trace!( + url = self.url, + connection_count = self.connections.len(), + "Created new pooled connection" + ); Ok((sub_count, new_connection)) } - /// Picks a slot with available capacity using first-fit. - /// Returns None if no slot has capacity (need to create new connection). - fn pick_connection<'a>( + /// Tries to atomically reserve a subscription slot on an existing + /// connection via CAS, ensuring we never exceed + /// `per_connection_sub_limit`. + fn try_reserve_slot<'a>( &self, guard: &'a Guard, - ) -> Option<&'a PooledConnection> { - self.connections.iter(guard).find(|conn| { - conn.sub_count.load(Ordering::SeqCst) - < self.per_connection_sub_limit - }) + ) -> Option<(Arc, Arc)> { + for conn in self.connections.iter(guard) { + let sub_count = &conn.sub_count; + loop { + let current = sub_count.load(Ordering::SeqCst); + if current >= self.per_connection_sub_limit { + break; + } + if sub_count + .compare_exchange( + current, + current + 1, + Ordering::SeqCst, + Ordering::SeqCst, + ) + .is_ok() + { + return Some(( + Arc::clone(&conn.sub_count), + Arc::clone(&conn.connection), + )); + } + } + } + None } /// Wraps a raw unsubscribe function to also decrement the sub counter for the From 29158ee3b4e385248518ada820b0100cd6c9d710 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Sat, 7 Feb 2026 21:01:38 +0800 Subject: [PATCH 09/64] chore: reconnect pubsub pool on recovery Amp-Thread-ID: https://ampcode.com/threads/T-019c3830-1c06-7406-831c-fe2af2fd8314 Co-authored-by: Amp --- .../chain_pubsub_actor.rs | 2 +- .../pubsub_connection_pool.rs | 39 +++++++++++++++---- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index 15f0f8f1d..18a13798f 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -737,7 +737,7 @@ impl ChainPubsubActor { is_connected: Arc, ) -> RemoteAccountProviderResult<()> { // 1. Try to reconnect the pubsub connection - pubsub_connection.clear_connections(); + pubsub_connection.reconnect().await?; // Make a sub to any account and unsub immediately to verify connection let pubkey = Pubkey::new_unique(); let config = RpcAccountInfoConfig { diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index 1a0593397..a489c2808 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -3,14 +3,16 @@ use std::sync::{ Arc, }; -use tokio::sync::Mutex as AsyncMutex; - use scc::{ebr::Guard, Queue}; use solana_pubkey::Pubkey; -use solana_pubsub_client::pubsub_client::PubsubClientError; +use solana_pubsub_client::{ + nonblocking::pubsub_client::PubsubClientResult, + pubsub_client::PubsubClientError, +}; use solana_rpc_client_api::config::{ RpcAccountInfoConfig, RpcProgramAccountsConfig, }; +use tokio::sync::Mutex as AsyncMutex; use tracing::*; use super::{ @@ -141,9 +143,32 @@ impl PubSubConnectionPool { } } - /// Reconnects the pool: clears state and reconnects the first slot. - pub fn clear_connections(&self) { + /// Reconnects the pool: clears state and tries to reconnect the + /// first connection to ensure that the provider is working + /// NOTE: assumes that all existing subscriptions have been dropped. + pub async fn reconnect(&self) -> PubsubClientResult<()> { while self.connections.pop().is_some() {} + // We cannot reconnect an existing connection due to the lockless queue + // not allowing us to call the async reconnect method of the first connection. + // Instead we clear all of them and then create a new one, just to verify that + // the provider is working again + let pooled_conn = match T::new(self.url.clone()).await { + Ok(conn) => { + conn.reconnect().await?; + PooledConnection { + connection: Arc::new(conn), + sub_count: Arc::new(AtomicUsize::new(0)), + } + } + Err(err) => { + return Err(PubsubClientError::ConnectionClosed(format!( + "{err:?}" + ))); + } + }; + // Since we already created it we keep it as well + self.connections.push(pooled_conn); + Ok(()) } /// Finds a connection for a new subscription, creating new connections @@ -191,9 +216,9 @@ impl PubSubConnectionPool { /// Tries to atomically reserve a subscription slot on an existing /// connection via CAS, ensuring we never exceed /// `per_connection_sub_limit`. - fn try_reserve_slot<'a>( + fn try_reserve_slot( &self, - guard: &'a Guard, + guard: &Guard, ) -> Option<(Arc, Arc)> { for conn in self.connections.iter(guard) { let sub_count = &conn.sub_count; From ec5073a070f5319eb4dc06248d04804da742c061 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Sat, 7 Feb 2026 21:15:37 +0800 Subject: [PATCH 10/64] refactor: extract subscribe logic to helper method Amp-Thread-ID: https://ampcode.com/threads/T-019c3838-288d-732e-bcf0-bafe984b350b Co-authored-by: Amp --- .../pubsub_connection_pool.rs | 52 ++++++++++--------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index a489c2808..16ea077a8 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -85,30 +85,11 @@ impl PubSubConnectionPool { pubkey: &Pubkey, config: RpcAccountInfoConfig, ) -> SubscribeResult { - let (sub_count, connection) = - match self.find_or_create_connection().await { - Ok(result) => result, - Err(err) => { - return Err(PubsubClientError::SubscribeFailed { - reason: "Unable to find or create connection" - .to_string(), - message: format!("{err:?}"), - }); - } - }; - - // Subscribe using the selected connection - match connection.account_subscribe(pubkey, config).await { - Ok((stream, raw_unsub)) => { - let wrapped_unsub = self.wrap_unsub(raw_unsub, sub_count); - Ok((stream, wrapped_unsub)) - } - Err(err) => { - // Rollback: decrement count - sub_count.fetch_sub(1, Ordering::SeqCst); - Err(err) - } - } + let pubkey = *pubkey; + self.subscribe_with_pool(|connection| async move { + connection.account_subscribe(&pubkey, config).await + }) + .await } /// Subscribes to program account updates, distributing across pool slots. @@ -117,6 +98,27 @@ impl PubSubConnectionPool { program_id: &Pubkey, config: RpcProgramAccountsConfig, ) -> ProgramSubscribeResult { + let program_id = *program_id; + self.subscribe_with_pool(|connection| async move { + connection.program_subscribe(&program_id, config).await + }) + .await + } + + /// Helper to perform a subscription with a provided closure, + /// handling find-or-create, error mapping, rollback, and unsubscribe wrapping. + async fn subscribe_with_pool( + &self, + subscribe_fn: F, + ) -> PubsubClientResult<(S, UnsubscribeFn)> + where + F: FnOnce(Arc) -> Fut, + Fut: std::future::Future< + Output = PubsubClientResult<(S, UnsubscribeFn)>, + >, + S: 'static, + { + // Find or create a connection let (sub_count, connection) = match self.find_or_create_connection().await { Ok(result) => result, @@ -130,7 +132,7 @@ impl PubSubConnectionPool { }; // Subscribe using the selected connection - match connection.program_subscribe(program_id, config).await { + match subscribe_fn(connection).await { Ok((stream, raw_unsub)) => { let wrapped_unsub = self.wrap_unsub(raw_unsub, sub_count); Ok((stream, wrapped_unsub)) From a37d798fb27ce944958682e4a72b5eebb4248ce4 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 9 Feb 2026 15:37:57 +0800 Subject: [PATCH 11/64] chore: harden pubsub reconnect to ensure all existing subs are closed --- .../chain_pubsub_actor.rs | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index 18a13798f..53beadaa6 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -143,6 +143,14 @@ impl ChainPubsubActor { shutdown_token: CancellationToken, ) { info!(client_id = client_id, "Shutting down pubsub actor"); + Self::unsubscribe_all(subscriptions, program_subs); + shutdown_token.cancel(); + } + + fn unsubscribe_all( + subscriptions: Arc>>, + program_subs: Arc>>, + ) { let subs = subscriptions .lock() .expect("subscriptions lock poisoned") @@ -157,7 +165,6 @@ impl ChainPubsubActor { for (_, sub) in subs { sub.cancellation_token.cancel(); } - shutdown_token.cancel(); } pub fn subscription_count(&self, filter: &[Pubkey]) -> usize { @@ -387,6 +394,8 @@ impl ChainPubsubActor { let result = Self::try_reconnect( pubsub_connection, pubsub_client_config, + subscriptions, + program_subs, client_id, is_connected, ) @@ -729,14 +738,19 @@ impl ChainPubsubActor { }); } - #[instrument(skip(pubsub_connection, pubsub_client_config, is_connected), fields(client_id = %client_id))] + #[instrument(skip(pubsub_connection, pubsub_client_config, subs, program_subs, is_connected), fields(client_id = %client_id))] async fn try_reconnect( pubsub_connection: Arc>, pubsub_client_config: PubsubClientConfig, + subs: Arc>>, + program_subs: Arc>>, client_id: &str, is_connected: Arc, ) -> RemoteAccountProviderResult<()> { - // 1. Try to reconnect the pubsub connection + // 1. Ensure we cleaned all existing subscriptions + Self::unsubscribe_all(subs, program_subs); + + // 2. Try to reconnect the pubsub connection pubsub_connection.reconnect().await?; // Make a sub to any account and unsub immediately to verify connection let pubkey = Pubkey::new_unique(); @@ -746,7 +760,7 @@ impl ChainPubsubActor { ..Default::default() }; - // 2. Try to subscribe to an account to verify connection + // 3. Try to subscribe to an account to verify connection let (_, unsubscribe) = match pubsub_connection.account_subscribe(&pubkey, config).await { Ok(res) => res, @@ -759,10 +773,10 @@ impl ChainPubsubActor { } }; - // 3. Unsubscribe immediately + // 4. Unsubscribe immediately unsubscribe().await; - // 4. We are now connected again + // 5. We are now connected again is_connected.store(true, Ordering::SeqCst); Ok(()) } From 5c4a206edcdd99419a078a8ca256b84822c6b37a Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 9 Feb 2026 17:01:21 +0800 Subject: [PATCH 12/64] feat: add pubsub_client_connections_gauge metric Amp-Thread-ID: https://ampcode.com/threads/T-019c419c-5b98-774b-b9d6-40733b964154 Co-authored-by: Amp --- .../chain_pubsub_actor.rs | 24 +++++++++++-------- .../pubsub_connection_pool.rs | 14 ++++++++++- magicblock-metrics/src/metrics/mod.rs | 18 ++++++++++++++ 3 files changed, 45 insertions(+), 11 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index 53beadaa6..a14df193b 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -98,15 +98,19 @@ impl ChainPubsubActor { .per_stream_subscription_limit .unwrap_or(usize::MAX); let pubsub_connection = { - let pubsub_pool = PubSubConnectionPool::new(url, limit) - .await - .inspect_err(|err| { - error!( - client_id = client_id, - err = ?err, - "Failed to connect to provider" - ) - })?; + let pubsub_pool = PubSubConnectionPool::new( + url, + limit, + client_id.to_string(), + ) + .await + .inspect_err(|err| { + error!( + client_id = client_id, + err = ?err, + "Failed to connect to provider" + ) + })?; Arc::new(pubsub_pool) }; @@ -833,4 +837,4 @@ impl ChainPubsubActor { } }); } -} +} \ No newline at end of file diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs index 16ea077a8..1f18c3749 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_connection_pool.rs @@ -3,6 +3,7 @@ use std::sync::{ Arc, }; +use magicblock_metrics::metrics; use scc::{ebr::Guard, Queue}; use solana_pubkey::Pubkey; use solana_pubsub_client::{ @@ -47,6 +48,7 @@ pub struct PubSubConnectionPool { url: String, per_connection_sub_limit: usize, new_connection_guard: AsyncMutex<()>, + client_id: String, } impl PubSubConnectionPool { @@ -54,6 +56,7 @@ impl PubSubConnectionPool { pub async fn new( url: String, limit: usize, + client_id: String, ) -> RemoteAccountProviderResult> { // Creating initial connection also to verify that provider is valid let connection = Arc::new(T::new(url.clone()).await?); @@ -66,11 +69,13 @@ impl PubSubConnectionPool { queue.push(conn); queue }; + metrics::set_pubsub_client_connections_count(&client_id, 1); Ok(Self { connections: Arc::new(queue), url, per_connection_sub_limit: limit, new_connection_guard: AsyncMutex::new(()), + client_id, }) } @@ -170,6 +175,7 @@ impl PubSubConnectionPool { }; // Since we already created it we keep it as well self.connections.push(pooled_conn); + metrics::set_pubsub_client_connections_count(&self.client_id, 1); Ok(()) } @@ -207,9 +213,14 @@ impl PubSubConnectionPool { sub_count: Arc::clone(&sub_count), }; self.connections.push(conn); + let connection_count = self.connections.len(); + metrics::set_pubsub_client_connections_count( + &self.client_id, + connection_count, + ); trace!( url = self.url, - connection_count = self.connections.len(), + connection_count, "Created new pooled connection" ); Ok((sub_count, new_connection)) @@ -302,6 +313,7 @@ mod tests { PubSubConnectionPool::::new( "mock://".to_string(), limit, + "test_client".to_string(), ) .await .unwrap() diff --git a/magicblock-metrics/src/metrics/mod.rs b/magicblock-metrics/src/metrics/mod.rs index 1f38cb74f..439b8971e 100644 --- a/magicblock-metrics/src/metrics/mod.rs +++ b/magicblock-metrics/src/metrics/mod.rs @@ -458,6 +458,14 @@ lazy_static::lazy_static! { ), &["client_id"], ).unwrap(); + + static ref PUBSUB_CLIENT_CONNECTIONS_GAUGE: IntGaugeVec = IntGaugeVec::new( + Opts::new( + "pubsub_client_connections_gauge", + "Number of pooled websocket connections per pubsub client" + ), + &["client_id"], + ).unwrap(); } pub(crate) fn register() { @@ -533,6 +541,7 @@ pub(crate) fn register() { register!(PUBSUB_CLIENT_FAILED_RECONNECT_ATTEMPTS_GAUGE); register!(PUBSUB_CLIENT_RESUBSCRIBE_DELAY_MILLISECONDS_GAUGE); register!(PUBSUB_CLIENT_RESUBSCRIBED_GAUGE); + register!(PUBSUB_CLIENT_CONNECTIONS_GAUGE); }); } @@ -823,3 +832,12 @@ pub fn set_pubsub_client_resubscribed_count(client_id: &str, count: usize) { .with_label_values(&[client_id]) .set(count as i64); } + +pub fn set_pubsub_client_connections_count( + client_id: &str, + count: usize, +) { + PUBSUB_CLIENT_CONNECTIONS_GAUGE + .with_label_values(&[client_id]) + .set(count as i64); +} \ No newline at end of file From 5c98860b0dcc506ab09023f3e340395efa43ad0f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 9 Feb 2026 17:15:33 +0800 Subject: [PATCH 13/64] fix: fmt --- .../chain_pubsub_actor.rs | 25 ++++++++----------- magicblock-metrics/src/metrics/mod.rs | 7 ++---- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index a14df193b..a708a6739 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -98,19 +98,16 @@ impl ChainPubsubActor { .per_stream_subscription_limit .unwrap_or(usize::MAX); let pubsub_connection = { - let pubsub_pool = PubSubConnectionPool::new( - url, - limit, - client_id.to_string(), - ) - .await - .inspect_err(|err| { - error!( - client_id = client_id, - err = ?err, - "Failed to connect to provider" - ) - })?; + let pubsub_pool = + PubSubConnectionPool::new(url, limit, client_id.to_string()) + .await + .inspect_err(|err| { + error!( + client_id = client_id, + err = ?err, + "Failed to connect to provider" + ) + })?; Arc::new(pubsub_pool) }; @@ -837,4 +834,4 @@ impl ChainPubsubActor { } }); } -} \ No newline at end of file +} diff --git a/magicblock-metrics/src/metrics/mod.rs b/magicblock-metrics/src/metrics/mod.rs index 439b8971e..3a375974a 100644 --- a/magicblock-metrics/src/metrics/mod.rs +++ b/magicblock-metrics/src/metrics/mod.rs @@ -833,11 +833,8 @@ pub fn set_pubsub_client_resubscribed_count(client_id: &str, count: usize) { .set(count as i64); } -pub fn set_pubsub_client_connections_count( - client_id: &str, - count: usize, -) { +pub fn set_pubsub_client_connections_count(client_id: &str, count: usize) { PUBSUB_CLIENT_CONNECTIONS_GAUGE .with_label_values(&[client_id]) .set(count as i64); -} \ No newline at end of file +} From 58089c84f973c1f0bfd84442d99335fdca168829 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 10 Feb 2026 11:58:20 +0800 Subject: [PATCH 14/64] tmp: dial down max subs per connection --- .../src/remote_account_provider/pubsub_common.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs b/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs index 75bf67108..d1872fa60 100644 --- a/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs +++ b/magicblock-chainlink/src/remote_account_provider/pubsub_common.rs @@ -97,7 +97,7 @@ pub enum ChainPubsubActorMessage { }, } -pub const HELIUS_PER_STREAM_SUBSCRIPTION_LIMIT: usize = 100; +pub const HELIUS_PER_STREAM_SUBSCRIPTION_LIMIT: usize = 80; pub const SUBSCRIPTION_UPDATE_CHANNEL_SIZE: usize = 5_000; pub const MESSAGE_CHANNEL_SIZE: usize = 1_000; From 6bb8e0d1ac245acb9cce42de138b8faacb0ec116 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 10 Feb 2026 16:27:38 +0800 Subject: [PATCH 15/64] chore: fix unsub bug --- .../src/remote_account_provider/chain_pubsub_actor.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index 328920dda..04ffc1467 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -722,10 +722,7 @@ impl ChainPubsubActor { is_connected.clone(), &format!("Program subscription ended for {program_pubkey}") ); - // Return early - abort_and_signal_connection_issue cancels all - // subscriptions, triggering cleanup via the cancellation path - // above. No need to run unsubscribe/cleanup here. - return; + break; } } } From 02f3dd84837f6b9f17a14c2a89d52662853d113c Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 11 Feb 2026 09:50:56 +0800 Subject: [PATCH 16/64] chore: subscriptions fn returns hashset --- .../src/remote_account_provider/chain_laser_client.rs | 2 +- .../src/remote_account_provider/chain_pubsub_actor.rs | 6 +++--- .../src/remote_account_provider/chain_pubsub_client.rs | 6 +++--- .../src/remote_account_provider/chain_updates_client.rs | 2 +- magicblock-chainlink/src/remote_account_provider/mod.rs | 2 +- magicblock-chainlink/src/submux/mod.rs | 6 +++--- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs index 09fd96cb4..10fce5928 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs @@ -181,7 +181,7 @@ impl ChainPubsubClient for ChainLaserClientImpl { None } - fn subscriptions(&self) -> Option> { + fn subscriptions(&self) -> Option> { None } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index 04ffc1467..e227d48da 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -1,5 +1,5 @@ use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, sync::{ atomic::{AtomicBool, AtomicU16, Ordering}, Arc, Mutex, @@ -186,9 +186,9 @@ impl ChainPubsubActor { } } - pub fn subscriptions(&self) -> Vec { + pub fn subscriptions(&self) -> HashSet { if !self.is_connected.load(Ordering::SeqCst) { - return vec![]; + return HashSet::new(); } let subs = self .subscriptions diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index ac645412b..b2dbf9a9c 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -53,7 +53,7 @@ pub trait ChainPubsubClient: Send + Sync + Clone + 'static { exclude: Option<&[Pubkey]>, ) -> Option<(usize, usize)>; - fn subscriptions(&self) -> Option>; + fn subscriptions(&self) -> Option>; fn subs_immediately(&self) -> bool; @@ -211,7 +211,7 @@ impl ChainPubsubClient for ChainPubsubClientImpl { Some((total, filtered)) } - fn subscriptions(&self) -> Option> { + fn subscriptions(&self) -> Option> { Some(self.actor.subscriptions()) } @@ -486,7 +486,7 @@ pub mod mock { Some((total, filtered)) } - fn subscriptions(&self) -> Option> { + fn subscriptions(&self) -> Option> { let subs = self.subscribed_pubkeys.lock(); Some(subs.iter().copied().collect()) } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs index 35532d70d..bf73936c8 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs @@ -161,7 +161,7 @@ impl ChainPubsubClient for ChainUpdatesClient { } } - fn subscriptions(&self) -> Option> { + fn subscriptions(&self) -> Option> { use ChainUpdatesClient::*; match self { WebSocket(client) => client.subscriptions(), diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index b20a7184f..c739cb5ee 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -250,7 +250,7 @@ impl RemoteAccountProvider { if tracing::enabled!(tracing::Level::DEBUG) { pubsub_client.subscriptions().unwrap_or_default() } else { - vec![] + HashSet::new() }; let (pubsub_total, pubsub_without_never_evict) = diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index a06b5deb4..2b9d086fe 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -744,7 +744,7 @@ where maybe_forward_now } - fn get_subscriptions(clients: &[Arc]) -> Option> { + fn get_subscriptions(clients: &[Arc]) -> Option> { let mut all_subs = HashSet::new(); for client in clients { if let Some(subs) = client.subscriptions() { @@ -753,7 +753,7 @@ where return None; } } - Some(all_subs.into_iter().collect()) + Some(all_subs) } /// Number of clients that must confirm an account subscription for it to be considered active. @@ -890,7 +890,7 @@ where /// Gets the union of all subscriptions across all inner clients. /// Unless one is reconnecting, this should be identical to /// getting it from a single inner client. - fn subscriptions(&self) -> Option> { + fn subscriptions(&self) -> Option> { Self::get_subscriptions(&self.clients) } From b8ba3e741ac3a01a3f19a5e6b6356f4d1a7b9d39 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 11 Feb 2026 09:53:00 +0800 Subject: [PATCH 17/64] chore: subs union --- .../src/remote_account_provider/chain_laser_client.rs | 2 +- .../src/remote_account_provider/chain_pubsub_client.rs | 10 +++++++--- .../remote_account_provider/chain_updates_client.rs | 6 +++--- .../src/remote_account_provider/mod.rs | 8 ++++---- .../remote_account_provider/subscription_reconciler.rs | 2 +- magicblock-chainlink/src/submux/mod.rs | 4 ++-- 6 files changed, 18 insertions(+), 14 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs index 10fce5928..a8dc54cd6 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs @@ -181,7 +181,7 @@ impl ChainPubsubClient for ChainLaserClientImpl { None } - fn subscriptions(&self) -> Option> { + fn subscriptions_union(&self) -> Option> { None } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index b2dbf9a9c..fa5b2c147 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -53,7 +53,7 @@ pub trait ChainPubsubClient: Send + Sync + Clone + 'static { exclude: Option<&[Pubkey]>, ) -> Option<(usize, usize)>; - fn subscriptions(&self) -> Option>; + fn subscriptions_union(&self) -> Option>; fn subs_immediately(&self) -> bool; @@ -211,7 +211,7 @@ impl ChainPubsubClient for ChainPubsubClientImpl { Some((total, filtered)) } - fn subscriptions(&self) -> Option> { + fn subscriptions_union(&self) -> Option> { Some(self.actor.subscriptions()) } @@ -486,7 +486,11 @@ pub mod mock { Some((total, filtered)) } - fn subscriptions(&self) -> Option> { + /// Returns the subscriptions of a client or the union of subscriptions + /// if there are multiple clients. + /// This means that if any client is subscribed to a pubkey, it will be + /// included in the returned set even if other clients are not subscribed to it. + fn subscriptions_union(&self) -> Option> { let subs = self.subscribed_pubkeys.lock(); Some(subs.iter().copied().collect()) } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs index bf73936c8..ca756638b 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs @@ -161,11 +161,11 @@ impl ChainPubsubClient for ChainUpdatesClient { } } - fn subscriptions(&self) -> Option> { + fn subscriptions_union(&self) -> Option> { use ChainUpdatesClient::*; match self { - WebSocket(client) => client.subscriptions(), - Laser(client) => client.subscriptions(), + WebSocket(client) => client.subscriptions_union(), + Laser(client) => client.subscriptions_union(), } } diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index c739cb5ee..c30af9f93 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -248,7 +248,7 @@ impl RemoteAccountProvider { let all_pubsub_subs = if tracing::enabled!(tracing::Level::DEBUG) { - pubsub_client.subscriptions().unwrap_or_default() + pubsub_client.subscriptions_union().unwrap_or_default() } else { HashSet::new() }; @@ -1887,7 +1887,7 @@ mod test { .await; // Verify all accounts are now subscribed - let subs = pubsub_client.subscriptions().unwrap(); + let subs = pubsub_client.subscriptions_union().unwrap(); assert!(subs.contains(&pubkey1)); assert!(subs.contains(&pubkey2)); assert!(subs.contains(&pubkey3)); @@ -1929,7 +1929,7 @@ mod test { .await; // Verify only pubkey1 remains subscribed - let subs = pubsub_client.subscriptions().unwrap(); + let subs = pubsub_client.subscriptions_union().unwrap(); assert!(subs.contains(&pubkey1)); assert!(!subs.contains(&pubkey2)); assert!(!subs.contains(&pubkey3)); @@ -1979,7 +1979,7 @@ mod test { // Verify: pubkey_in_lru and never_evicted_pubkey remain, stale_pubkey // is unsubscribed - let subs = pubsub_client.subscriptions().unwrap(); + let subs = pubsub_client.subscriptions_union().unwrap(); assert!( subs.contains(&pubkey_in_lru), "Account in LRU should remain subscribed" diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index 9315c640a..8fa817e74 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -72,7 +72,7 @@ pub async fn reconcile_subscriptions( never_evicted: &[Pubkey], removed_account_tx: &mpsc::Sender, ) { - let all_pubsub_subs = pubsub_client.subscriptions().unwrap_or_default(); + let all_pubsub_subs = pubsub_client.subscriptions_union().unwrap_or_default(); let lru_pubkeys = subscribed_accounts.pubkeys(); let pubsub_subs_without_never_evict: HashSet<_> = all_pubsub_subs diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index 2b9d086fe..a45a65034 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -747,7 +747,7 @@ where fn get_subscriptions(clients: &[Arc]) -> Option> { let mut all_subs = HashSet::new(); for client in clients { - if let Some(subs) = client.subscriptions() { + if let Some(subs) = client.subscriptions_union() { all_subs.extend(subs); } else { return None; @@ -890,7 +890,7 @@ where /// Gets the union of all subscriptions across all inner clients. /// Unless one is reconnecting, this should be identical to /// getting it from a single inner client. - fn subscriptions(&self) -> Option> { + fn subscriptions_union(&self) -> Option> { Self::get_subscriptions(&self.clients) } From 72ce02194ac347c6f7e9fd8594d33c732a7c8d03 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 09:27:27 +0700 Subject: [PATCH 18/64] chore: introducing union/intersection --- .../chain_laser_client.rs | 2 + .../chain_pubsub_client.rs | 14 +++++++ .../subscription_reconciler.rs | 4 +- magicblock-chainlink/src/submux/mod.rs | 42 ++++++++++++------- 4 files changed, 45 insertions(+), 17 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs index a8dc54cd6..2c50e885d 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs @@ -182,6 +182,8 @@ impl ChainPubsubClient for ChainLaserClientImpl { } fn subscriptions_union(&self) -> Option> { + // Even though subscriptions are not activated immediately we consider them complete + // once added to the subscription list. None } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index fa5b2c147..5aaea29f6 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -46,6 +46,7 @@ pub trait ChainPubsubClient: Send + Sync + Clone + 'static { /// Provides the total number of subscriptions and the number of /// subscriptions when excludig pubkeys in `exclude`. + /// TODO: @@@ what is it recommended to count /// - `exclude`: Optional slice of pubkeys to exclude from the count. /// Returns a tuple of (total subscriptions, filtered subscriptions). async fn subscription_count( @@ -53,8 +54,21 @@ pub trait ChainPubsubClient: Send + Sync + Clone + 'static { exclude: Option<&[Pubkey]>, ) -> Option<(usize, usize)>; + /// Returns the subscriptions of a client or the union of subscriptions + /// if there are multiple clients. + /// This means that if any client is subscribed to a pubkey, it will be + /// included in the returned set even if other clients are not subscribed to it. fn subscriptions_union(&self) -> Option>; + /// Returns the intersection of subscriptions across all underlying + /// clients. For a single client this is identical to [ChainPubsubClient::subscriptions_union]. + /// For an implementer with multiple clients it returns only the pubkeys + /// that every client is subscribed to. + /// If any client has no subscriptions, None is returned. + fn subscriptions_intersection(&self) -> Option> { + self.subscriptions_union() + } + fn subs_immediately(&self) -> bool; fn id(&self) -> &str; diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index 8fa817e74..cd7465b87 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -72,7 +72,9 @@ pub async fn reconcile_subscriptions( never_evicted: &[Pubkey], removed_account_tx: &mpsc::Sender, ) { - let all_pubsub_subs = pubsub_client.subscriptions_union().unwrap_or_default(); + // TODO: @@@ consider both union and intersection when reconciling + let all_pubsub_subs = + pubsub_client.subscriptions_union().unwrap_or_default(); let lru_pubkeys = subscribed_accounts.pubkeys(); let pubsub_subs_without_never_evict: HashSet<_> = all_pubsub_subs diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index a45a65034..9e1f46622 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -744,18 +744,6 @@ where maybe_forward_now } - fn get_subscriptions(clients: &[Arc]) -> Option> { - let mut all_subs = HashSet::new(); - for client in clients { - if let Some(subs) = client.subscriptions_union() { - all_subs.extend(subs); - } else { - return None; - } - } - Some(all_subs) - } - /// Number of clients that must confirm an account subscription for it to be considered active. /// 2/3 of connected clients subscribing immediately. fn required_account_subscription_confirmations(&self) -> usize { @@ -887,11 +875,33 @@ where Some((max_total, max_filtered)) } - /// Gets the union of all subscriptions across all inner clients. - /// Unless one is reconnecting, this should be identical to - /// getting it from a single inner client. fn subscriptions_union(&self) -> Option> { - Self::get_subscriptions(&self.clients) + let mut union = HashSet::new(); + for client in &self.clients { + if let Some(subs) = client.subscriptions_union() { + union.extend(subs); + } + } + Some(union) + } + + fn subscriptions_intersection(&self) -> Option> { + let mut acc = HashSet::::new(); + for client in &self.clients { + if let Some(subs) = client.subscriptions_intersection() { + if acc.is_empty() { + acc = subs; + } else { + acc = acc + .intersection(&subs) + .cloned() + .collect::>(); + } + } else { + return None; + } + } + Some(acc) } /// Returns true if any inner client subscribes immediately From 4dd13c07e47ba73c04a057bbf3d2e00a4b662cab Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 10:35:23 +0700 Subject: [PATCH 19/64] chore: laser client has access to shared subscriptions --- .../chain_laser_actor.rs | 108 ++++++++++++------ .../chain_laser_client.rs | 40 ++++--- 2 files changed, 101 insertions(+), 47 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs index beebf42b6..41676df3e 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs @@ -2,7 +2,10 @@ use std::{ collections::{HashMap, HashSet}, fmt, pin::Pin, - sync::atomic::{AtomicU16, AtomicU64, Ordering}, + sync::{ + atomic::{AtomicU16, AtomicU64, Ordering}, + Arc, + }, time::Duration, }; @@ -23,6 +26,7 @@ use magicblock_metrics::metrics::{ inc_per_program_account_updates_count, inc_program_subscription_account_updates_count, }; +use parking_lot::RwLock; use solana_account::Account; use solana_commitment_config::CommitmentLevel as SolanaCommitmentLevel; use solana_pubkey::Pubkey; @@ -55,6 +59,8 @@ const SLOTS_BETWEEN_ACTIVATIONS: u64 = SUBSCRIPTION_ACTIVATION_INTERVAL_MILLIS / 400; const MAX_SLOTS_BACKFILL: u64 = 400; +pub type SharedSubscriptions = Arc>>; + // ----------------- // Slots // ----------------- @@ -116,8 +122,10 @@ impl fmt::Display for AccountUpdateSource { pub struct ChainLaserActor { /// Configuration used to create the laser client laser_client_config: LaserstreamConfig, - /// Requested subscriptions, some may not be active yet - subscriptions: HashSet, + /// Requested subscriptions, some may not be active yet. + /// Shared with ChainLaserClientImpl for sync access to + /// subscription_count and subscriptions_union. + subscriptions: SharedSubscriptions, /// Pubkeys of currently active subscriptions active_subscription_pubkeys: HashSet, /// Subscriptions that have been activated via the helius provider @@ -155,6 +163,7 @@ impl ChainLaserActor { Self, mpsc::Sender, mpsc::Receiver, + SharedSubscriptions, ) { let channel_options = ChannelOptions { connect_timeout_secs: Some(5), @@ -190,6 +199,7 @@ impl ChainLaserActor { Self, mpsc::Sender, mpsc::Receiver, + SharedSubscriptions, ) { let (subscription_updates_sender, subscription_updates_receiver) = mpsc::channel(SUBSCRIPTION_UPDATE_CHANNEL_SIZE); @@ -197,10 +207,13 @@ impl ChainLaserActor { mpsc::channel(MESSAGE_CHANNEL_SIZE); let commitment = grpc_commitment_from_solana(commitment); + let subscriptions: SharedSubscriptions = Default::default(); + let shared_subscriptions = Arc::clone(&subscriptions); + let me = Self { laser_client_config, messages_receiver, - subscriptions: Default::default(), + subscriptions, active_subscriptions: Default::default(), active_subscription_pubkeys: Default::default(), program_subscriptions: Default::default(), @@ -212,14 +225,19 @@ impl ChainLaserActor { rpc_client, }; - (me, messages_sender, subscription_updates_receiver) + ( + me, + messages_sender, + subscription_updates_receiver, + shared_subscriptions, + ) } #[allow(dead_code)] #[instrument(skip(self), fields(client_id = %self.client_id))] fn shutdown(&mut self) { info!("Shutting down laser actor"); - self.subscriptions.clear(); + self.subscriptions.write().clear(); self.active_subscriptions.clear(); self.active_subscription_pubkeys.clear(); } @@ -256,7 +274,7 @@ impl ChainLaserActor { None => { debug!("Account subscription stream ended"); Self::signal_connection_issue( - &mut self.subscriptions, + &self.subscriptions, &mut self.active_subscriptions, &mut self.active_subscription_pubkeys, &mut self.program_subscriptions, @@ -281,7 +299,7 @@ impl ChainLaserActor { None => { debug!("Program subscription stream ended"); Self::signal_connection_issue( - &mut self.subscriptions, + &self.subscriptions, &mut self.active_subscriptions, &mut self.active_subscription_pubkeys, &mut self.program_subscriptions, @@ -338,7 +356,7 @@ impl ChainLaserActor { Shutdown { response } => { info!(client_id = self.client_id, "Received Shutdown message"); Self::clear_subscriptions( - &mut self.subscriptions, + &self.subscriptions, &mut self.active_subscriptions, &mut self.active_subscription_pubkeys, &mut self.program_subscriptions, @@ -360,14 +378,28 @@ impl ChainLaserActor { pubkey: Pubkey, sub_response: oneshot::Sender>, ) { - if self.subscriptions.contains(&pubkey) { + let inserted = { + // Fast path: check with read lock first + let already_subscribed = { + let subs = self.subscriptions.read(); + subs.contains(&pubkey) + }; + + if already_subscribed { + false + } else { + // Write lock only when we need to modify + let mut subs = self.subscriptions.write(); + subs.insert(pubkey); + true + } + }; + if !inserted { debug!(pubkey = %pubkey, "Already subscribed to account"); sub_response.send(Ok(())).unwrap_or_else(|_| { warn!(pubkey = %pubkey, "Failed to send already subscribed response"); }); } else { - self.subscriptions.insert(pubkey); - // If this is the first sub for the clock sysvar we want to activate it immediately if self.active_subscriptions.is_empty() { self.update_active_subscriptions(); } @@ -383,7 +415,17 @@ impl ChainLaserActor { pubkey: &Pubkey, unsub_response: oneshot::Sender>, ) { - match self.subscriptions.remove(pubkey) { + // Fast path: check with read lock first + let exists = self.subscriptions.read().contains(pubkey); + + let removed = if exists { + // Write lock only when we need to modify + self.subscriptions.write().remove(pubkey) + } else { + false + }; + + match removed { true => { trace!(pubkey = %pubkey, "Unsubscribed from account"); unsub_response.send(Ok(())).unwrap_or_else(|_| { @@ -405,25 +447,28 @@ impl ChainLaserActor { } fn update_active_subscriptions(&mut self) { - // Check if the active subscriptions match what we already have - let new_pubkeys: HashSet = - self.subscriptions.iter().copied().collect(); - if new_pubkeys == self.active_subscription_pubkeys { - trace!( - count = self.subscriptions.len(), - "Active subscriptions already up to date" - ); - return; - } + // Copy subscriptions and release the read lock immediately + let new_pubkeys: HashSet = { + let subs = self.subscriptions.read(); + // Check if the active subscriptions match what we already have + if subs.eq(&self.active_subscription_pubkeys) { + trace!( + count = subs.len(), + "Active subscriptions already up to date" + ); + return; + } + subs.iter().copied().collect() + }; inc_account_subscription_activations_count(&self.client_id); let mut new_subs: StreamMap = StreamMap::new(); // Re-create streams for all subscriptions - let subs = self.subscriptions.iter().collect::>(); + let sub_refs = new_pubkeys.iter().collect::>(); - let chunks = subs + let chunks = sub_refs .chunks(PER_STREAM_SUBSCRIPTION_LIMIT) .map(|chunk| chunk.to_vec()) .collect::>(); @@ -435,7 +480,7 @@ impl ChainLaserActor { if tracing::enabled!(tracing::Level::TRACE) { trace!( - account_count = self.subscriptions.len(), + account_count = new_pubkeys.len(), chain_slot, from_slot, stream_count = chunks.len(), @@ -639,7 +684,7 @@ impl ChainLaserActor { error!(error = ?err, slots = ?self.slots, "Error in {} stream", source); Self::signal_connection_issue( - &mut self.subscriptions, + &self.subscriptions, &mut self.active_subscriptions, &mut self.active_subscription_pubkeys, &mut self.program_subscriptions, @@ -710,12 +755,12 @@ impl ChainLaserActor { } fn clear_subscriptions( - subscriptions: &mut HashSet, + subscriptions: &SharedSubscriptions, active_subscriptions: &mut StreamMap, active_subscription_pubkeys: &mut HashSet, program_subscriptions: &mut Option<(HashSet, LaserStream)>, ) { - subscriptions.clear(); + subscriptions.write().clear(); active_subscriptions.clear(); active_subscription_pubkeys.clear(); *program_subscriptions = None; @@ -727,7 +772,7 @@ impl ChainLaserActor { /// we add this as a backup in case it is unable to do so #[instrument(skip(subscriptions, active_subscriptions, active_subscription_pubkeys, program_subscriptions, abort_sender), fields(client_id = %client_id))] async fn signal_connection_issue( - subscriptions: &mut HashSet, + subscriptions: &SharedSubscriptions, active_subscriptions: &mut StreamMap, active_subscription_pubkeys: &mut HashSet, program_subscriptions: &mut Option<(HashSet, LaserStream)>, @@ -837,8 +882,7 @@ impl ChainLaserActor { ); } - if !self.subscriptions.contains(&pubkey) { - // Ignore updates for accounts we are not subscribed to + if !self.subscriptions.read().contains(&pubkey) { return; } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs index 2c50e885d..5c64264b7 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs @@ -11,7 +11,7 @@ use tokio::sync::{mpsc, oneshot}; use tracing::*; use crate::remote_account_provider::{ - chain_laser_actor::{ChainLaserActor, Slots}, + chain_laser_actor::{ChainLaserActor, SharedSubscriptions, Slots}, chain_rpc_client::ChainRpcClientImpl, pubsub_common::{ChainPubsubActorMessage, SubscriptionUpdate}, ChainPubsubClient, ReconnectableClient, RemoteAccountProviderError, @@ -48,6 +48,8 @@ pub struct ChainLaserClientImpl { updates: Arc>>>, /// Channel to send messages to the actor messages: mpsc::Sender, + /// Shared subscriptions with the actor for sync access + subscriptions: SharedSubscriptions, /// Client identifier client_id: String, } @@ -62,18 +64,20 @@ impl ChainLaserClientImpl { slots: Slots, rpc_client: ChainRpcClientImpl, ) -> Self { - let (actor, messages, updates) = ChainLaserActor::new_from_url( - pubsub_url, - &client_id, - api_key, - commitment, - abort_sender, - slots, - rpc_client, - ); + let (actor, messages, updates, subscriptions) = + ChainLaserActor::new_from_url( + pubsub_url, + &client_id, + api_key, + commitment, + abort_sender, + slots, + rpc_client, + ); let client = Self { updates: Arc::new(Mutex::new(Some(updates))), messages, + subscriptions, client_id, }; tokio::spawn(actor.run()); @@ -176,15 +180,21 @@ impl ChainPubsubClient for ChainLaserClientImpl { async fn subscription_count( &self, - _exclude: Option<&[Pubkey]>, + exclude: Option<&[Pubkey]>, ) -> Option<(usize, usize)> { - None + let subs = self.subscriptions.read(); + let total = subs.len(); + let filtered = match exclude { + Some(exclude) => { + subs.iter().filter(|pk| !exclude.contains(pk)).count() + } + None => total, + }; + Some((total, filtered)) } fn subscriptions_union(&self) -> Option> { - // Even though subscriptions are not activated immediately we consider them complete - // once added to the subscription list. - None + Some(self.subscriptions.read().clone()) } fn subs_immediately(&self) -> bool { From 4d823e7ab35ab83b93511dfa015e48620ea7350d Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 11:00:06 +0700 Subject: [PATCH 20/64] chore: all clients return subs (instead option) --- .../chain_laser_client.rs | 8 +-- .../chain_pubsub_client.rs | 23 +++++---- .../chain_updates_client.rs | 4 +- .../src/remote_account_provider/mod.rs | 20 ++------ .../subscription_reconciler.rs | 3 +- magicblock-chainlink/src/submux/mod.rs | 49 ++++++++----------- 6 files changed, 44 insertions(+), 63 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs index 5c64264b7..a57009451 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs @@ -181,7 +181,7 @@ impl ChainPubsubClient for ChainLaserClientImpl { async fn subscription_count( &self, exclude: Option<&[Pubkey]>, - ) -> Option<(usize, usize)> { + ) -> (usize, usize) { let subs = self.subscriptions.read(); let total = subs.len(); let filtered = match exclude { @@ -190,11 +190,11 @@ impl ChainPubsubClient for ChainLaserClientImpl { } None => total, }; - Some((total, filtered)) + (total, filtered) } - fn subscriptions_union(&self) -> Option> { - Some(self.subscriptions.read().clone()) + fn subscriptions_union(&self) -> HashSet { + self.subscriptions.read().clone() } fn subs_immediately(&self) -> bool { diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index 5aaea29f6..4a4f1f957 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -52,20 +52,19 @@ pub trait ChainPubsubClient: Send + Sync + Clone + 'static { async fn subscription_count( &self, exclude: Option<&[Pubkey]>, - ) -> Option<(usize, usize)>; + ) -> (usize, usize); /// Returns the subscriptions of a client or the union of subscriptions /// if there are multiple clients. /// This means that if any client is subscribed to a pubkey, it will be /// included in the returned set even if other clients are not subscribed to it. - fn subscriptions_union(&self) -> Option>; + fn subscriptions_union(&self) -> HashSet; /// Returns the intersection of subscriptions across all underlying /// clients. For a single client this is identical to [ChainPubsubClient::subscriptions_union]. /// For an implementer with multiple clients it returns only the pubkeys /// that every client is subscribed to. - /// If any client has no subscriptions, None is returned. - fn subscriptions_intersection(&self) -> Option> { + fn subscriptions_intersection(&self) -> HashSet { self.subscriptions_union() } @@ -215,18 +214,18 @@ impl ChainPubsubClient for ChainPubsubClientImpl { async fn subscription_count( &self, exclude: Option<&[Pubkey]>, - ) -> Option<(usize, usize)> { + ) -> (usize, usize) { let total = self.actor.subscription_count(&[]); let filtered = if let Some(exclude) = exclude { self.actor.subscription_count(exclude) } else { total }; - Some((total, filtered)) + (total, filtered) } - fn subscriptions_union(&self) -> Option> { - Some(self.actor.subscriptions()) + fn subscriptions_union(&self) -> HashSet { + self.actor.subscriptions() } fn subs_immediately(&self) -> bool { @@ -486,7 +485,7 @@ pub mod mock { async fn subscription_count( &self, exclude: Option<&[Pubkey]>, - ) -> Option<(usize, usize)> { + ) -> (usize, usize) { let pubkeys: Vec = { let subs = self.subscribed_pubkeys.lock(); subs.iter().cloned().collect() @@ -497,16 +496,16 @@ pub mod mock { .iter() .filter(|pubkey| !exclude.contains(pubkey)) .count(); - Some((total, filtered)) + (total, filtered) } /// Returns the subscriptions of a client or the union of subscriptions /// if there are multiple clients. /// This means that if any client is subscribed to a pubkey, it will be /// included in the returned set even if other clients are not subscribed to it. - fn subscriptions_union(&self) -> Option> { + fn subscriptions_union(&self) -> HashSet { let subs = self.subscribed_pubkeys.lock(); - Some(subs.iter().copied().collect()) + subs.iter().copied().collect() } fn subs_immediately(&self) -> bool { diff --git a/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs index ca756638b..a33c0a972 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs @@ -153,7 +153,7 @@ impl ChainPubsubClient for ChainUpdatesClient { async fn subscription_count( &self, exclude: Option<&[Pubkey]>, - ) -> Option<(usize, usize)> { + ) -> (usize, usize) { use ChainUpdatesClient::*; match self { WebSocket(client) => client.subscription_count(exclude).await, @@ -161,7 +161,7 @@ impl ChainPubsubClient for ChainUpdatesClient { } } - fn subscriptions_union(&self) -> Option> { + fn subscriptions_union(&self) -> HashSet { use ChainUpdatesClient::*; match self { WebSocket(client) => client.subscriptions_union(), diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index c30af9f93..8cfeba362 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -242,27 +242,17 @@ impl RemoteAccountProvider { loop { interval.tick().await; let lru_count = subscribed_accounts.len(); - let subscription_counts = pubsub_client + let (pubsub_total, pubsub_without_never_evict) = pubsub_client .subscription_count(Some(&never_evicted)) .await; let all_pubsub_subs = if tracing::enabled!(tracing::Level::DEBUG) { - pubsub_client.subscriptions_union().unwrap_or_default() + pubsub_client.subscriptions_union() } else { HashSet::new() }; - let (pubsub_total, pubsub_without_never_evict) = - match subscription_counts { - Some(counts) => counts, - None => { - warn!( - "No connected client that tracks subscriptions" - ); - (0, 0) - } - }; if lru_count != pubsub_without_never_evict { warn!( lru_count, @@ -1887,7 +1877,7 @@ mod test { .await; // Verify all accounts are now subscribed - let subs = pubsub_client.subscriptions_union().unwrap(); + let subs = pubsub_client.subscriptions_union(); assert!(subs.contains(&pubkey1)); assert!(subs.contains(&pubkey2)); assert!(subs.contains(&pubkey3)); @@ -1929,7 +1919,7 @@ mod test { .await; // Verify only pubkey1 remains subscribed - let subs = pubsub_client.subscriptions_union().unwrap(); + let subs = pubsub_client.subscriptions_union(); assert!(subs.contains(&pubkey1)); assert!(!subs.contains(&pubkey2)); assert!(!subs.contains(&pubkey3)); @@ -1979,7 +1969,7 @@ mod test { // Verify: pubkey_in_lru and never_evicted_pubkey remain, stale_pubkey // is unsubscribed - let subs = pubsub_client.subscriptions_union().unwrap(); + let subs = pubsub_client.subscriptions_union(); assert!( subs.contains(&pubkey_in_lru), "Account in LRU should remain subscribed" diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index cd7465b87..d713de311 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -73,8 +73,7 @@ pub async fn reconcile_subscriptions( removed_account_tx: &mpsc::Sender, ) { // TODO: @@@ consider both union and intersection when reconciling - let all_pubsub_subs = - pubsub_client.subscriptions_union().unwrap_or_default(); + let all_pubsub_subs = pubsub_client.subscriptions_union(); let lru_pubkeys = subscribed_accounts.pubkeys(); let pubsub_subs_without_never_evict: HashSet<_> = all_pubsub_subs diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index 9e1f46622..96c398bc5 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -857,51 +857,44 @@ where async fn subscription_count( &self, exclude: Option<&[Pubkey]>, - ) -> Option<(usize, usize)> { + ) -> (usize, usize) { let mut max_total = 0; let mut max_filtered = 0; for client in &self.clients { - if let Some((total, filtered)) = - client.subscription_count(exclude).await - { - if total > max_total { - max_total = total; - } - if filtered > max_filtered { - max_filtered = filtered; - } + let (total, filtered) = client.subscription_count(exclude).await; + if total > max_total { + max_total = total; + } + if filtered > max_filtered { + max_filtered = filtered; } } - Some((max_total, max_filtered)) + (max_total, max_filtered) } - fn subscriptions_union(&self) -> Option> { + fn subscriptions_union(&self) -> HashSet { let mut union = HashSet::new(); for client in &self.clients { - if let Some(subs) = client.subscriptions_union() { - union.extend(subs); - } + let subs = client.subscriptions_union(); + union.extend(subs); } - Some(union) + union } - fn subscriptions_intersection(&self) -> Option> { + fn subscriptions_intersection(&self) -> HashSet { let mut acc = HashSet::::new(); for client in &self.clients { - if let Some(subs) = client.subscriptions_intersection() { - if acc.is_empty() { - acc = subs; - } else { - acc = acc - .intersection(&subs) - .cloned() - .collect::>(); - } + let subs = client.subscriptions_intersection(); + if acc.is_empty() { + acc = subs; } else { - return None; + acc = acc + .intersection(&subs) + .cloned() + .collect::>(); } } - Some(acc) + acc } /// Returns true if any inner client subscribes immediately From 06a4f67c2b2d55b62a89a038678bff9b7362e184 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 11:10:45 +0700 Subject: [PATCH 21/64] chore: optimize set intersection method for submux --- magicblock-chainlink/src/submux/mod.rs | 34 ++++++++++++++++---------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index 96c398bc5..73eaeb717 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -882,19 +882,27 @@ where } fn subscriptions_intersection(&self) -> HashSet { - let mut acc = HashSet::::new(); - for client in &self.clients { - let subs = client.subscriptions_intersection(); - if acc.is_empty() { - acc = subs; - } else { - acc = acc - .intersection(&subs) - .cloned() - .collect::>(); - } + let sets: Vec> = self + .clients + .iter() + .map(|c| c.subscriptions_intersection()) + .collect(); + if sets.is_empty() { + return HashSet::new(); } - acc + // Find the smallest set to iterate over, then check membership + // in all others — no intermediate cloning/collecting. + let smallest = + sets.iter().min_by_key(|s| s.len()).unwrap(); + smallest + .iter() + .filter(|pk| { + sets.iter() + .filter(|s| !std::ptr::eq(*s, smallest)) + .all(|s| s.contains(pk)) + }) + .copied() + .collect() } /// Returns true if any inner client subscribes immediately @@ -1724,4 +1732,4 @@ mod tests { mux.shutdown().await.unwrap(); } -} +} \ No newline at end of file From c3d5329e211b7e7bf71cf41d6f1e2c5e811a551d Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 11:41:24 +0700 Subject: [PATCH 22/64] feat: reconciler considers union vs. intersection of subscriptions --- .../chain_laser_actor.rs | 2 +- .../subscription_reconciler.rs | 37 +++++++++++-------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs index 41676df3e..9433c314d 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs @@ -395,7 +395,7 @@ impl ChainLaserActor { } }; if !inserted { - debug!(pubkey = %pubkey, "Already subscribed to account"); + trace!(pubkey = %pubkey, "Already subscribed to account"); sub_response.send(Ok(())).unwrap_or_else(|_| { warn!(pubkey = %pubkey, "Failed to send already subscribed response"); }); diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index d713de311..4970f894f 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -72,41 +72,48 @@ pub async fn reconcile_subscriptions( never_evicted: &[Pubkey], removed_account_tx: &mpsc::Sender, ) { - // TODO: @@@ consider both union and intersection when reconciling - let all_pubsub_subs = pubsub_client.subscriptions_union(); + let pubsub_union = pubsub_client.subscriptions_union(); + let pubsub_intersection = pubsub_client.subscriptions_intersection(); let lru_pubkeys = subscribed_accounts.pubkeys(); - let pubsub_subs_without_never_evict: HashSet<_> = all_pubsub_subs - .iter() + let ensured_subs_without_never_evict: HashSet<_> = pubsub_intersection + .into_iter() + .filter(|pk| !never_evicted.contains(pk)) + .collect(); + let partial_subs_without_never_evict: HashSet<_> = pubsub_union + .into_iter() .filter(|pk| !never_evicted.contains(pk)) - .copied() .collect(); let lru_pubkeys_set: HashSet<_> = lru_pubkeys.into_iter().collect(); - // A) LRU has more subscriptions than pubsub - need to resubscribe + // A) LRU subs that are not ensured by all clients let extra_in_lru: Vec<_> = lru_pubkeys_set - .difference(&pubsub_subs_without_never_evict) - .cloned() + .difference(&ensured_subs_without_never_evict) .collect(); - - // B) Pubsub has more subscriptions than LRU - need to unsubscribe - let extra_in_pubsub: Vec<_> = pubsub_subs_without_never_evict - .difference(&lru_pubkeys_set) - .cloned() + // B) Subs not in LRU that some clients are subscribed to + let extra_in_pubsub: Vec<_> = partial_subs_without_never_evict + .difference(&ensured_subs_without_never_evict) .collect(); + // For any sub that is in the LRU but not ensured by all clients we resubscribe. + // This may call subscribe on some clients that already have the subscription and + // is ignored by that client. if !extra_in_lru.is_empty() { debug!( count = extra_in_lru.len(), "Resubscribing accounts in LRU but not in pubsub" ); for pubkey in extra_in_lru { - if let Err(e) = pubsub_client.subscribe(pubkey, None).await { + if let Err(e) = pubsub_client.subscribe(*pubkey, None).await { warn!(pubkey = %pubkey, error = ?e, "Failed to resubscribe account"); } } } + // For any sub that is in any client but not in the LRU we unsubscribe and trigger a removal + // notification. + // This may call unsubscribe on some clients that don't have the subscription and + // is ignored by that client. if !extra_in_pubsub.is_empty() { debug!( count = extra_in_pubsub.len(), @@ -114,7 +121,7 @@ pub async fn reconcile_subscriptions( ); for pubkey in extra_in_pubsub { unsubscribe_and_notify_removal( - pubkey, + *pubkey, pubsub_client, removed_account_tx, ) From 230328f5b24d2d98b88b42c6ce5fc96c73c819e5 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 12:03:32 +0700 Subject: [PATCH 23/64] chore: test reconciler --- .../subscription_reconciler.rs | 249 ++++++++++++++++++ 1 file changed, 249 insertions(+) diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index 4970f894f..6527d5102 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -129,3 +129,252 @@ pub async fn reconcile_subscriptions( } } } + +#[cfg(test)] +mod tests { + use std::num::NonZeroUsize; + + use solana_pubkey::Pubkey; + use tokio::sync::mpsc; + + use crate::remote_account_provider::{ + chain_pubsub_client::mock::ChainPubsubClientMock, + lru_cache::AccountsLruCache, pubsub_common::SubscriptionUpdate, + }; + + use super::*; + + fn create_test_pubkey(seed: u8) -> Pubkey { + let mut bytes = [0u8; 32]; + bytes[0] = seed; + Pubkey::from(bytes) + } + + #[tokio::test] + async fn test_subs_in_lru_and_clients_same_noop() { + let (tx, rx) = mpsc::channel::(10); + let mock_client = ChainPubsubClientMock::new(tx, rx); + + // Set up LRU with 2 accounts + let lru = AccountsLruCache::new(NonZeroUsize::new(10).unwrap()); + let pk1 = create_test_pubkey(1); + let pk2 = create_test_pubkey(2); + + lru.add(pk1); + lru.add(pk2); + + // Set up client with same subscriptions + mock_client.insert_subscription(pk1); + mock_client.insert_subscription(pk2); + + // Create removal channel + let (removed_tx, _removed_rx) = mpsc::channel::(10); + + // Reconcile + reconcile_subscriptions(&lru, &mock_client, &[], &removed_tx).await; + + // Verify subscriptions are unchanged + let subs = mock_client.subscriptions_union(); + assert_eq!(subs.len(), 2); + assert!(subs.contains(&pk1)); + assert!(subs.contains(&pk2)); + } + + #[tokio::test] + async fn test_not_all_lru_subs_ensured_resubscribes() { + let (tx, rx) = mpsc::channel::(10); + let mock_client = ChainPubsubClientMock::new(tx, rx); + + // Set up LRU with 3 accounts + let lru = AccountsLruCache::new(NonZeroUsize::new(10).unwrap()); + let pk1 = create_test_pubkey(1); + let pk2 = create_test_pubkey(2); + let pk3 = create_test_pubkey(3); + + lru.add(pk1); + lru.add(pk2); + lru.add(pk3); + + // Client only has pk1 and pk2 + mock_client.insert_subscription(pk1); + mock_client.insert_subscription(pk2); + + // Create removal channel + let (removed_tx, _removed_rx) = mpsc::channel::(10); + + // Reconcile + reconcile_subscriptions(&lru, &mock_client, &[], &removed_tx).await; + + // Verify pk3 was resubscribed + let subs = mock_client.subscriptions_union(); + assert_eq!(subs.len(), 3); + assert!(subs.contains(&pk1)); + assert!(subs.contains(&pk2)); + assert!(subs.contains(&pk3)); + } + + #[tokio::test] + async fn test_never_evicted_accounts_excluded() { + let (tx, rx) = mpsc::channel::(10); + let mock_client = ChainPubsubClientMock::new(tx, rx); + + // Set up LRU with 2 accounts + let lru = AccountsLruCache::new(NonZeroUsize::new(10).unwrap()); + let pk1 = create_test_pubkey(1); + let pk2 = create_test_pubkey(2); + let never_evict_pk = create_test_pubkey(99); + + lru.add(pk1); + lru.add(pk2); + + // Client has all 3 subscriptions + mock_client.insert_subscription(pk1); + mock_client.insert_subscription(pk2); + mock_client.insert_subscription(never_evict_pk); + + // Create removal channel + let (removed_tx, mut removed_rx) = mpsc::channel::(10); + let never_evicted = vec![never_evict_pk]; + + // Reconcile + reconcile_subscriptions( + &lru, + &mock_client, + &never_evicted, + &removed_tx, + ) + .await; + + // Verify never_evict_pk is still subscribed + let subs = mock_client.subscriptions_union(); + assert_eq!(subs.len(), 3); + assert!(subs.contains(&pk1)); + assert!(subs.contains(&pk2)); + assert!(subs.contains(&never_evict_pk)); + + // Verify no removal notification for never_evict_pk + assert!(removed_rx.try_recv().is_err()); + } + + #[tokio::test] + async fn test_resubscribe_missing_account() { + let (tx, rx) = mpsc::channel::(10); + let mock_client = ChainPubsubClientMock::new(tx, rx); + + // Set up LRU with pk1, pk2, pk3 + let lru = AccountsLruCache::new(NonZeroUsize::new(10).unwrap()); + let pk1 = create_test_pubkey(1); + let pk2 = create_test_pubkey(2); + let pk3 = create_test_pubkey(3); + + lru.add(pk1); + lru.add(pk2); + lru.add(pk3); + + // Client only has pk1 and pk2 + mock_client.insert_subscription(pk1); + mock_client.insert_subscription(pk2); + + // Create removal channel + let (removed_tx, _removed_rx) = mpsc::channel::(10); + + // Reconcile + reconcile_subscriptions(&lru, &mock_client, &[], &removed_tx).await; + + // Verify pk3 was resubscribed + let subs = mock_client.subscriptions_union(); + assert_eq!(subs.len(), 3); + assert!(subs.contains(&pk1)); + assert!(subs.contains(&pk2)); + assert!(subs.contains(&pk3)); + } + + /// Test case: Empty LRU should cause resubscribe of all LRU accounts if missing + /// Expected: No-op if pubsub is also empty (single client case) + #[tokio::test] + async fn test_empty_lru_empty_pubsub_noop() { + let (tx, rx) = mpsc::channel::(10); + let mock_client = ChainPubsubClientMock::new(tx, rx); + + // Set up empty LRU + let lru = AccountsLruCache::new(NonZeroUsize::new(10).unwrap()); + + // Empty pubsub (single client case) + // Create removal channel + let (removed_tx, _removed_rx) = mpsc::channel::(10); + + // Reconcile + reconcile_subscriptions(&lru, &mock_client, &[], &removed_tx).await; + + // Verify state unchanged (both empty) + let subs = mock_client.subscriptions_union(); + assert_eq!(subs.len(), 0); + } + + /// Test case: Empty pubsub with subscriptions in LRU + /// Expected: Resubscribe to all accounts + #[tokio::test] + async fn test_empty_pubsub_resubscribes_all() { + let (tx, rx) = mpsc::channel::(10); + let mock_client = ChainPubsubClientMock::new(tx, rx); + + // Set up LRU with 2 accounts + let lru = AccountsLruCache::new(NonZeroUsize::new(10).unwrap()); + let pk1 = create_test_pubkey(1); + let pk2 = create_test_pubkey(2); + lru.add(pk1); + lru.add(pk2); + + // Empty pubsub + + // Create removal channel + let (removed_tx, _removed_rx) = mpsc::channel::(10); + + // Reconcile + reconcile_subscriptions(&lru, &mock_client, &[], &removed_tx).await; + + // Verify all subscriptions added + let subs = mock_client.subscriptions_union(); + assert_eq!(subs.len(), 2); + assert!(subs.contains(&pk1)); + assert!(subs.contains(&pk2)); + } + + /// Test case: Multiple accounts missing from pubsub need resubscription + /// Expected: All missing accounts get resubscribed + #[tokio::test] + async fn test_multiple_missing_accounts_resubscribed() { + let (tx, rx) = mpsc::channel::(10); + let mock_client = ChainPubsubClientMock::new(tx, rx); + + // Set up LRU with pk1, pk2, pk3, pk4 + let lru = AccountsLruCache::new(NonZeroUsize::new(10).unwrap()); + let pk1 = create_test_pubkey(1); + let pk2 = create_test_pubkey(2); + let pk3 = create_test_pubkey(3); + let pk4 = create_test_pubkey(4); + + lru.add(pk1); + lru.add(pk2); + lru.add(pk3); + lru.add(pk4); + + // Client only has pk1 and pk2 + mock_client.insert_subscription(pk1); + mock_client.insert_subscription(pk2); + + // Create removal channel + let (removed_tx, _removed_rx) = mpsc::channel::(10); + + // Reconcile + reconcile_subscriptions(&lru, &mock_client, &[], &removed_tx).await; + + // Verify all accounts are now subscribed + let subs = mock_client.subscriptions_union(); + assert_eq!(subs.len(), 4); + assert!(subs.contains(&pk1)); + assert!(subs.contains(&pk2)); + assert!(subs.contains(&pk3)); + assert!(subs.contains(&pk4)); + } +} From 5e1668a408648a252fb87e4a7d0b8f7d929abf9d Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 12:33:11 +0700 Subject: [PATCH 24/64] chore: move previously existint reconciler tests to same module --- .../src/remote_account_provider/mod.rs | 149 -------------- .../subscription_reconciler.rs | 183 +++++++++++++++++- 2 files changed, 180 insertions(+), 152 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index 8cfeba362..650bbff64 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -1841,153 +1841,4 @@ mod test { assert_eq!(removed_accounts, vec![expected_evicted]); } } - - #[tokio::test] - async fn test_reconcile_resubscribes_accounts_missing_from_pubsub() { - init_logger(); - - let (tx, rx) = mpsc::channel(1); - let pubsub_client = ChainPubsubClientMock::new(tx, rx); - let (removed_tx, _removed_rx) = mpsc::channel(10); - - let capacity = NonZeroUsize::new(10).unwrap(); - let lru_cache = Arc::new(AccountsLruCache::new(capacity)); - - let pubkey1 = Pubkey::new_unique(); - let pubkey2 = Pubkey::new_unique(); - let pubkey3 = Pubkey::new_unique(); - - // Add accounts to LRU cache - lru_cache.add(pubkey1); - lru_cache.add(pubkey2); - lru_cache.add(pubkey3); - - // Only pubkey1 is in pubsub (simulating missing subscriptions) - pubsub_client.insert_subscription(pubkey1); - - let never_evicted: Vec = vec![]; - - // Reconcile should resubscribe pubkey2 and pubkey3 - reconcile_subscriptions( - &lru_cache, - &pubsub_client, - &never_evicted, - &removed_tx, - ) - .await; - - // Verify all accounts are now subscribed - let subs = pubsub_client.subscriptions_union(); - assert!(subs.contains(&pubkey1)); - assert!(subs.contains(&pubkey2)); - assert!(subs.contains(&pubkey3)); - assert_eq!(subs.len(), 3); - } - - #[tokio::test] - async fn test_reconcile_unsubscribes_accounts_not_in_lru() { - init_logger(); - - let (tx, rx) = mpsc::channel(1); - let pubsub_client = ChainPubsubClientMock::new(tx, rx); - let (removed_tx, mut removed_rx) = mpsc::channel(10); - - let capacity = NonZeroUsize::new(10).unwrap(); - let lru_cache = Arc::new(AccountsLruCache::new(capacity)); - - let pubkey1 = Pubkey::new_unique(); - let pubkey2 = Pubkey::new_unique(); - let pubkey3 = Pubkey::new_unique(); - - // Only pubkey1 is in LRU cache - lru_cache.add(pubkey1); - - // All three are in pubsub (simulating stale subscriptions) - pubsub_client.insert_subscription(pubkey1); - pubsub_client.insert_subscription(pubkey2); - pubsub_client.insert_subscription(pubkey3); - - let never_evicted: Vec = vec![]; - - // Reconcile should unsubscribe pubkey2 and pubkey3 - reconcile_subscriptions( - &lru_cache, - &pubsub_client, - &never_evicted, - &removed_tx, - ) - .await; - - // Verify only pubkey1 remains subscribed - let subs = pubsub_client.subscriptions_union(); - assert!(subs.contains(&pubkey1)); - assert!(!subs.contains(&pubkey2)); - assert!(!subs.contains(&pubkey3)); - assert_eq!(subs.len(), 1); - - // Verify removal notifications were sent for unsubscribed accounts - let removed = drain_removed_account_rx(&mut removed_rx); - assert_eq!(removed.len(), 2); - assert!(removed.contains(&pubkey2)); - assert!(removed.contains(&pubkey3)); - } - - #[tokio::test] - async fn test_reconcile_preserves_never_evicted_accounts_not_in_lru() { - init_logger(); - - let (tx, rx) = mpsc::channel(1); - let pubsub_client = ChainPubsubClientMock::new(tx, rx); - let (removed_tx, mut removed_rx) = mpsc::channel(10); - - let capacity = NonZeroUsize::new(10).unwrap(); - let lru_cache = Arc::new(AccountsLruCache::new(capacity)); - - let pubkey_in_lru = Pubkey::new_unique(); - let never_evicted_pubkey = Pubkey::new_unique(); - let stale_pubkey = Pubkey::new_unique(); - - // Only pubkey_in_lru is in LRU cache (never_evicted_pubkey is NOT in LRU) - lru_cache.add(pubkey_in_lru); - - // All three are subscribed in pubsub - pubsub_client.insert_subscription(pubkey_in_lru); - pubsub_client.insert_subscription(never_evicted_pubkey); - pubsub_client.insert_subscription(stale_pubkey); - - // never_evicted_pubkey is marked as never_evicted, so it should be - // preserved even though it's not in the LRU cache - let never_evicted = vec![never_evicted_pubkey]; - - reconcile_subscriptions( - &lru_cache, - &pubsub_client, - &never_evicted, - &removed_tx, - ) - .await; - - // Verify: pubkey_in_lru and never_evicted_pubkey remain, stale_pubkey - // is unsubscribed - let subs = pubsub_client.subscriptions_union(); - assert!( - subs.contains(&pubkey_in_lru), - "Account in LRU should remain subscribed" - ); - assert!( - subs.contains(&never_evicted_pubkey), - "Never-evicted account should remain subscribed even if not in LRU" - ); - assert!( - !subs.contains(&stale_pubkey), - "Stale account not in LRU and not never-evicted should be \ - unsubscribed" - ); - assert_eq!(subs.len(), 2); - - // Verify removal notification was sent only for stale_pubkey - let removed = drain_removed_account_rx(&mut removed_rx); - assert_eq!(removed.len(), 1); - assert!(removed.contains(&stale_pubkey)); - } } diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index 6527d5102..b2b965f85 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -133,13 +133,17 @@ pub async fn reconcile_subscriptions( #[cfg(test)] mod tests { use std::num::NonZeroUsize; + use std::sync::Arc; use solana_pubkey::Pubkey; use tokio::sync::mpsc; - use crate::remote_account_provider::{ - chain_pubsub_client::mock::ChainPubsubClientMock, - lru_cache::AccountsLruCache, pubsub_common::SubscriptionUpdate, + use crate::{ + remote_account_provider::{ + chain_pubsub_client::mock::ChainPubsubClientMock, + lru_cache::AccountsLruCache, pubsub_common::SubscriptionUpdate, + }, + testing::init_logger, }; use super::*; @@ -152,6 +156,8 @@ mod tests { #[tokio::test] async fn test_subs_in_lru_and_clients_same_noop() { + init_logger(); + let (tx, rx) = mpsc::channel::(10); let mock_client = ChainPubsubClientMock::new(tx, rx); @@ -182,6 +188,8 @@ mod tests { #[tokio::test] async fn test_not_all_lru_subs_ensured_resubscribes() { + init_logger(); + let (tx, rx) = mpsc::channel::(10); let mock_client = ChainPubsubClientMock::new(tx, rx); @@ -215,6 +223,8 @@ mod tests { #[tokio::test] async fn test_never_evicted_accounts_excluded() { + init_logger(); + let (tx, rx) = mpsc::channel::(10); let mock_client = ChainPubsubClientMock::new(tx, rx); @@ -258,6 +268,8 @@ mod tests { #[tokio::test] async fn test_resubscribe_missing_account() { + init_logger(); + let (tx, rx) = mpsc::channel::(10); let mock_client = ChainPubsubClientMock::new(tx, rx); @@ -293,6 +305,8 @@ mod tests { /// Expected: No-op if pubsub is also empty (single client case) #[tokio::test] async fn test_empty_lru_empty_pubsub_noop() { + init_logger(); + let (tx, rx) = mpsc::channel::(10); let mock_client = ChainPubsubClientMock::new(tx, rx); @@ -315,6 +329,8 @@ mod tests { /// Expected: Resubscribe to all accounts #[tokio::test] async fn test_empty_pubsub_resubscribes_all() { + init_logger(); + let (tx, rx) = mpsc::channel::(10); let mock_client = ChainPubsubClientMock::new(tx, rx); @@ -344,6 +360,8 @@ mod tests { /// Expected: All missing accounts get resubscribed #[tokio::test] async fn test_multiple_missing_accounts_resubscribed() { + init_logger(); + let (tx, rx) = mpsc::channel::(10); let mock_client = ChainPubsubClientMock::new(tx, rx); @@ -377,4 +395,163 @@ mod tests { assert!(subs.contains(&pk3)); assert!(subs.contains(&pk4)); } + + #[tokio::test] + async fn test_reconcile_resubscribes_accounts_missing_from_pubsub() { + init_logger(); + + let (tx, rx) = mpsc::channel(1); + let pubsub_client = ChainPubsubClientMock::new(tx, rx); + let (removed_tx, _removed_rx) = mpsc::channel(10); + + let capacity = NonZeroUsize::new(10).unwrap(); + let lru_cache = Arc::new(AccountsLruCache::new(capacity)); + + let pubkey1 = Pubkey::new_unique(); + let pubkey2 = Pubkey::new_unique(); + let pubkey3 = Pubkey::new_unique(); + + // Add accounts to LRU cache + lru_cache.add(pubkey1); + lru_cache.add(pubkey2); + lru_cache.add(pubkey3); + + // Only pubkey1 is in pubsub (simulating missing subscriptions) + pubsub_client.insert_subscription(pubkey1); + + let never_evicted: Vec = vec![]; + + // Reconcile should resubscribe pubkey2 and pubkey3 + reconcile_subscriptions( + &lru_cache, + &pubsub_client, + &never_evicted, + &removed_tx, + ) + .await; + + // Verify all accounts are now subscribed + let subs = pubsub_client.subscriptions_union(); + assert!(subs.contains(&pubkey1)); + assert!(subs.contains(&pubkey2)); + assert!(subs.contains(&pubkey3)); + assert_eq!(subs.len(), 3); + } + + fn drain_removed_account_rx( + rx: &mut mpsc::Receiver, + ) -> Vec { + let mut removed_accounts = Vec::new(); + while let Ok(pubkey) = rx.try_recv() { + removed_accounts.push(pubkey); + } + removed_accounts + } + + #[tokio::test] + async fn test_reconcile_unsubscribes_accounts_not_in_lru() { + init_logger(); + + let (tx, rx) = mpsc::channel(1); + let pubsub_client = ChainPubsubClientMock::new(tx, rx); + let (removed_tx, mut removed_rx) = mpsc::channel(10); + + let capacity = NonZeroUsize::new(10).unwrap(); + let lru_cache = Arc::new(AccountsLruCache::new(capacity)); + + let pubkey1 = Pubkey::new_unique(); + let pubkey2 = Pubkey::new_unique(); + let pubkey3 = Pubkey::new_unique(); + + // Only pubkey1 is in LRU cache + lru_cache.add(pubkey1); + + // All three are in pubsub (simulating stale subscriptions) + pubsub_client.insert_subscription(pubkey1); + pubsub_client.insert_subscription(pubkey2); + pubsub_client.insert_subscription(pubkey3); + + let never_evicted: Vec = vec![]; + + // Reconcile should unsubscribe pubkey2 and pubkey3 + reconcile_subscriptions( + &lru_cache, + &pubsub_client, + &never_evicted, + &removed_tx, + ) + .await; + + // Verify only pubkey1 remains subscribed + let subs = pubsub_client.subscriptions_union(); + assert!(subs.contains(&pubkey1)); + assert!(!subs.contains(&pubkey2)); + assert!(!subs.contains(&pubkey3)); + assert_eq!(subs.len(), 1); + + // Verify removal notifications were sent for unsubscribed accounts + let removed = drain_removed_account_rx(&mut removed_rx); + assert_eq!(removed.len(), 2); + assert!(removed.contains(&pubkey2)); + assert!(removed.contains(&pubkey3)); + } + + #[tokio::test] + async fn test_reconcile_preserves_never_evicted_accounts_not_in_lru() { + init_logger(); + + let (tx, rx) = mpsc::channel(1); + let pubsub_client = ChainPubsubClientMock::new(tx, rx); + let (removed_tx, mut removed_rx) = mpsc::channel(10); + + let capacity = NonZeroUsize::new(10).unwrap(); + let lru_cache = Arc::new(AccountsLruCache::new(capacity)); + + let pubkey_in_lru = Pubkey::new_unique(); + let never_evicted_pubkey = Pubkey::new_unique(); + let stale_pubkey = Pubkey::new_unique(); + + // Only pubkey_in_lru is in LRU cache (never_evicted_pubkey is NOT in LRU) + lru_cache.add(pubkey_in_lru); + + // All three are subscribed in pubsub + pubsub_client.insert_subscription(pubkey_in_lru); + pubsub_client.insert_subscription(never_evicted_pubkey); + pubsub_client.insert_subscription(stale_pubkey); + + // never_evicted_pubkey is marked as never_evicted, so it should be + // preserved even though it's not in the LRU cache + let never_evicted = vec![never_evicted_pubkey]; + + reconcile_subscriptions( + &lru_cache, + &pubsub_client, + &never_evicted, + &removed_tx, + ) + .await; + + // Verify: pubkey_in_lru and never_evicted_pubkey remain, stale_pubkey + // is unsubscribed + let subs = pubsub_client.subscriptions_union(); + assert!( + subs.contains(&pubkey_in_lru), + "Account in LRU should remain subscribed" + ); + assert!( + subs.contains(&never_evicted_pubkey), + "Never-evicted account should remain subscribed even if not in LRU" + ); + assert!( + !subs.contains(&stale_pubkey), + "Stale account not in LRU and not never-evicted should be \ + unsubscribed" + ); + assert_eq!(subs.len(), 2); + + // Verify removal notification was sent only for stale_pubkey + let removed = drain_removed_account_rx(&mut removed_rx); + assert_eq!(removed.len(), 1); + assert!(removed.contains(&stale_pubkey)); + } } From 4047bdc991d6f2003cd497dcd5e3c79cd4d3fb08 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 12:53:04 +0700 Subject: [PATCH 25/64] chore: fix bug in reconciler logic --- .../subscription_reconciler.rs | 35 ++++++++++++------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index b2b965f85..da5ec735a 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -87,14 +87,23 @@ pub async fn reconcile_subscriptions( let lru_pubkeys_set: HashSet<_> = lru_pubkeys.into_iter().collect(); // A) LRU subs that are not ensured by all clients - let extra_in_lru: Vec<_> = lru_pubkeys_set + let extra_in_lru: HashSet<_> = lru_pubkeys_set .difference(&ensured_subs_without_never_evict) .collect(); // B) Subs not in LRU that some clients are subscribed to - let extra_in_pubsub: Vec<_> = partial_subs_without_never_evict - .difference(&ensured_subs_without_never_evict) + let extra_in_pubsub: HashSet<_> = partial_subs_without_never_evict + .difference(&lru_pubkeys_set) .collect(); + trace!( + lru_count = lru_pubkeys_set.len(), + ensured_count = ensured_subs_without_never_evict.len(), + partial_count = partial_subs_without_never_evict.len(), + extra_in_lru_count = extra_in_lru.len(), + extra_in_pubsub_count = extra_in_pubsub.len(), + "Reconciling subscriptions between LRU and pubsub client" + ); + // For any sub that is in the LRU but not ensured by all clients we resubscribe. // This may call subscribe on some clients that already have the subscription and // is ignored by that client. @@ -103,6 +112,7 @@ pub async fn reconcile_subscriptions( count = extra_in_lru.len(), "Resubscribing accounts in LRU but not in pubsub" ); + trace!(pubkeys = ?extra_in_lru, "Resubscribing missing accounts"); for pubkey in extra_in_lru { if let Err(e) = pubsub_client.subscribe(*pubkey, None).await { warn!(pubkey = %pubkey, error = ?e, "Failed to resubscribe account"); @@ -119,6 +129,7 @@ pub async fn reconcile_subscriptions( count = extra_in_pubsub.len(), "Unsubscribing accounts in pubsub but not in LRU" ); + trace!(pubkeys = ?extra_in_pubsub, "Unsubscribing stale accounts"); for pubkey in extra_in_pubsub { unsubscribe_and_notify_removal( *pubkey, @@ -407,9 +418,9 @@ mod tests { let capacity = NonZeroUsize::new(10).unwrap(); let lru_cache = Arc::new(AccountsLruCache::new(capacity)); - let pubkey1 = Pubkey::new_unique(); - let pubkey2 = Pubkey::new_unique(); - let pubkey3 = Pubkey::new_unique(); + let pubkey1 = create_test_pubkey(1); + let pubkey2 = create_test_pubkey(2); + let pubkey3 = create_test_pubkey(3); // Add accounts to LRU cache lru_cache.add(pubkey1); @@ -459,9 +470,9 @@ mod tests { let capacity = NonZeroUsize::new(10).unwrap(); let lru_cache = Arc::new(AccountsLruCache::new(capacity)); - let pubkey1 = Pubkey::new_unique(); - let pubkey2 = Pubkey::new_unique(); - let pubkey3 = Pubkey::new_unique(); + let pubkey1 = create_test_pubkey(1); + let pubkey2 = create_test_pubkey(2); + let pubkey3 = create_test_pubkey(3); // Only pubkey1 is in LRU cache lru_cache.add(pubkey1); @@ -507,9 +518,9 @@ mod tests { let capacity = NonZeroUsize::new(10).unwrap(); let lru_cache = Arc::new(AccountsLruCache::new(capacity)); - let pubkey_in_lru = Pubkey::new_unique(); - let never_evicted_pubkey = Pubkey::new_unique(); - let stale_pubkey = Pubkey::new_unique(); + let pubkey_in_lru = create_test_pubkey(1); + let never_evicted_pubkey = create_test_pubkey(2); + let stale_pubkey = create_test_pubkey(3); // Only pubkey_in_lru is in LRU cache (never_evicted_pubkey is NOT in LRU) lru_cache.add(pubkey_in_lru); From 940db380f3920e1901b0f355beadfab31aa3ae50 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 13:08:35 +0700 Subject: [PATCH 26/64] chore: no more reconciliation outside reconciler --- .../src/remote_account_provider/errors.rs | 3 + .../src/remote_account_provider/lru_cache.rs | 2 +- .../src/remote_account_provider/mod.rs | 58 +------------------ .../subscription_reconciler.rs | 37 +++++++++--- 4 files changed, 33 insertions(+), 67 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/errors.rs b/magicblock-chainlink/src/remote_account_provider/errors.rs index cc1f255cf..15d7415e2 100644 --- a/magicblock-chainlink/src/remote_account_provider/errors.rs +++ b/magicblock-chainlink/src/remote_account_provider/errors.rs @@ -32,6 +32,9 @@ pub enum RemoteAccountProviderError { #[error("Failed to manage subscriptions ({0})")] AccountSubscriptionsTaskFailed(String), + #[error("Not all client's subscriptions were in sync ({0})")] + AccountSubscriptionsOutOfSync(String), + #[error("Connection disrupted")] ConnectionDisrupted, diff --git a/magicblock-chainlink/src/remote_account_provider/lru_cache.rs b/magicblock-chainlink/src/remote_account_provider/lru_cache.rs index 42bbd0c9f..10a8ee958 100644 --- a/magicblock-chainlink/src/remote_account_provider/lru_cache.rs +++ b/magicblock-chainlink/src/remote_account_provider/lru_cache.rs @@ -124,7 +124,7 @@ impl AccountsLruCache { !self.accounts_to_never_evict.contains(pubkey) } - pub fn pubkeys(&self) -> Vec { + pub fn pubkeys(&self) -> HashSet { let subs = self.subscribed_accounts.lock(); subs.iter().map(|(k, _)| *k).collect() } diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index 650bbff64..2eb361f80 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -241,58 +241,7 @@ impl RemoteAccountProvider { loop { interval.tick().await; - let lru_count = subscribed_accounts.len(); - let (pubsub_total, pubsub_without_never_evict) = pubsub_client - .subscription_count(Some(&never_evicted)) - .await; - - let all_pubsub_subs = - if tracing::enabled!(tracing::Level::DEBUG) { - pubsub_client.subscriptions_union() - } else { - HashSet::new() - }; - - if lru_count != pubsub_without_never_evict { - warn!( - lru_count, - pubsub_count = pubsub_without_never_evict, - "User account subscription counts don't match" - ); - if tracing::enabled!(tracing::Level::DEBUG) { - // Log all pubsub subscriptions for debugging - let count = all_pubsub_subs.len(); - trace!(count, "All pubsub subscriptions"); - - // Find extra keys in pubsub that are not in LRU cache - let lru_pubkeys = subscribed_accounts.pubkeys(); - let pubsub_subs_without_never_evict: HashSet<_> = - all_pubsub_subs - .iter() - .filter(|pk| !never_evicted.contains(pk)) - .copied() - .collect(); - let lru_pubkeys_set: HashSet<_> = - lru_pubkeys.into_iter().collect(); - - let extra_in_pubsub: Vec<_> = - pubsub_subs_without_never_evict - .difference(&lru_pubkeys_set) - .cloned() - .collect(); - let extra_in_lru: Vec<_> = lru_pubkeys_set - .difference(&pubsub_subs_without_never_evict) - .cloned() - .collect(); - - if !extra_in_pubsub.is_empty() { - debug!(count = extra_in_pubsub.len(), "Extra pubkeys in pubsub client not in LRU cache"); - } - if !extra_in_lru.is_empty() { - debug!(count = extra_in_lru.len(), "Extra pubkeys in LRU cache not in pubsub client"); - } - } - + let pubsub_total = subscription_reconciler::reconcile_subscriptions( &subscribed_accounts, pubsub_client.as_ref(), @@ -300,13 +249,8 @@ impl RemoteAccountProvider { &removed_account_tx, ) .await; - } debug!(count = pubsub_total, "Updating active subscriptions"); - if tracing::enabled!(tracing::Level::TRACE) { - let subs_count = all_pubsub_subs.len(); - trace!(count = subs_count, "All subscriptions"); - } set_monitored_accounts_count(pubsub_total); } }) diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index da5ec735a..57499951f 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -1,9 +1,12 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::atomic::AtomicU16}; +use magicblock_core::logger::log_trace_warn; use solana_pubkey::Pubkey; use tokio::sync::mpsc; use tracing::*; +use crate::remote_account_provider::RemoteAccountProviderError; + use super::{AccountsLruCache, ChainPubsubClient}; /// Unsubscribes from pubsub and sends a removal notification to trigger bank @@ -66,15 +69,17 @@ pub(crate) async fn unsubscribe_and_notify_removal( /// /// - `removed_account_tx`: Channel to notify upstream that an account was /// unsubscribed and should be removed from the bank. +/// - Returns: The number of accounts that are subscribed pub async fn reconcile_subscriptions( subscribed_accounts: &AccountsLruCache, pubsub_client: &PubsubClient, never_evicted: &[Pubkey], removed_account_tx: &mpsc::Sender, -) { +) -> usize { let pubsub_union = pubsub_client.subscriptions_union(); let pubsub_intersection = pubsub_client.subscriptions_intersection(); let lru_pubkeys = subscribed_accounts.pubkeys(); + let lru_count = lru_pubkeys.len(); let ensured_subs_without_never_evict: HashSet<_> = pubsub_intersection .into_iter() @@ -84,19 +89,18 @@ pub async fn reconcile_subscriptions( .into_iter() .filter(|pk| !never_evicted.contains(pk)) .collect(); - let lru_pubkeys_set: HashSet<_> = lru_pubkeys.into_iter().collect(); // A) LRU subs that are not ensured by all clients - let extra_in_lru: HashSet<_> = lru_pubkeys_set + let extra_in_lru: HashSet<_> = lru_pubkeys .difference(&ensured_subs_without_never_evict) .collect(); // B) Subs not in LRU that some clients are subscribed to let extra_in_pubsub: HashSet<_> = partial_subs_without_never_evict - .difference(&lru_pubkeys_set) + .difference(&lru_pubkeys) .collect(); trace!( - lru_count = lru_pubkeys_set.len(), + lru_count = lru_count, ensured_count = ensured_subs_without_never_evict.len(), partial_count = partial_subs_without_never_evict.len(), extra_in_lru_count = extra_in_lru.len(), @@ -108,9 +112,20 @@ pub async fn reconcile_subscriptions( // This may call subscribe on some clients that already have the subscription and // is ignored by that client. if !extra_in_lru.is_empty() { - debug!( - count = extra_in_lru.len(), - "Resubscribing accounts in LRU but not in pubsub" + static LOG_TRACE_COUNT: AtomicU16 = AtomicU16::new(0); + // If this happens a lot then this is serious since that means that some clients + // were not subscribed to all accounts + let len = extra_in_lru.len(); + let err = RemoteAccountProviderError::AccountSubscriptionsOutOfSync( + format!("{len} accounts in LRU but not in pubsub"), + ); + log_trace_warn( + "Consolidating missing subscriptions", + "Consolidated missing subscriptions repeatedly", + &len.to_string(), + &err, + 100, + &LOG_TRACE_COUNT, ); trace!(pubkeys = ?extra_in_lru, "Resubscribing missing accounts"); for pubkey in extra_in_lru { @@ -139,6 +154,10 @@ pub async fn reconcile_subscriptions( .await; } } + // We assume that reconciling worked and now our subscribed accounts are up to date + // Pubsubs should be subscribed to all accounts in LRU accounts no tracked by it since + // they are never evicted + lru_count + never_evicted.len() } #[cfg(test)] From 35e31a8e92b7f00fd3a995573c4cd549ec4fca27 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 13:11:23 +0700 Subject: [PATCH 27/64] chore: fmt + lint --- magicblock-chainlink/src/remote_account_provider/mod.rs | 2 +- .../remote_account_provider/subscription_reconciler.rs | 9 +++------ magicblock-chainlink/src/submux/mod.rs | 5 ++--- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index 2eb361f80..a9b1baa42 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{hash_map::Entry, HashMap}, num::NonZeroUsize, sync::{ atomic::{AtomicU64, Ordering}, diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index 57499951f..731f01229 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -5,9 +5,8 @@ use solana_pubkey::Pubkey; use tokio::sync::mpsc; use tracing::*; -use crate::remote_account_provider::RemoteAccountProviderError; - use super::{AccountsLruCache, ChainPubsubClient}; +use crate::remote_account_provider::RemoteAccountProviderError; /// Unsubscribes from pubsub and sends a removal notification to trigger bank /// removal. @@ -162,12 +161,12 @@ pub async fn reconcile_subscriptions( #[cfg(test)] mod tests { - use std::num::NonZeroUsize; - use std::sync::Arc; + use std::{num::NonZeroUsize, sync::Arc}; use solana_pubkey::Pubkey; use tokio::sync::mpsc; + use super::*; use crate::{ remote_account_provider::{ chain_pubsub_client::mock::ChainPubsubClientMock, @@ -176,8 +175,6 @@ mod tests { testing::init_logger, }; - use super::*; - fn create_test_pubkey(seed: u8) -> Pubkey { let mut bytes = [0u8; 32]; bytes[0] = seed; diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index 73eaeb717..4664e81e2 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -892,8 +892,7 @@ where } // Find the smallest set to iterate over, then check membership // in all others — no intermediate cloning/collecting. - let smallest = - sets.iter().min_by_key(|s| s.len()).unwrap(); + let smallest = sets.iter().min_by_key(|s| s.len()).unwrap(); smallest .iter() .filter(|pk| { @@ -1732,4 +1731,4 @@ mod tests { mux.shutdown().await.unwrap(); } -} \ No newline at end of file +} From 5253d6630fb3a8b76d21417fa64197688a956aa7 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 12 Feb 2026 13:21:00 +0700 Subject: [PATCH 28/64] chore: remove subscription_count method Amp-Thread-ID: https://ampcode.com/threads/T-019c5080-01ee-71fa-aa34-97264f3a0900 Co-authored-by: Amp --- .../chain_laser_client.rs | 17 +------- .../chain_pubsub_actor.rs | 19 +-------- .../chain_pubsub_client.rs | 42 +------------------ .../chain_updates_client.rs | 15 ------- magicblock-chainlink/src/submux/mod.rs | 28 ++----------- 5 files changed, 6 insertions(+), 115 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs index a57009451..7b02a41c9 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs @@ -178,21 +178,6 @@ impl ChainPubsubClient for ChainLaserClientImpl { .expect("ChainLaserClientImpl::take_updates called more than once") } - async fn subscription_count( - &self, - exclude: Option<&[Pubkey]>, - ) -> (usize, usize) { - let subs = self.subscriptions.read(); - let total = subs.len(); - let filtered = match exclude { - Some(exclude) => { - subs.iter().filter(|pk| !exclude.contains(pk)).count() - } - None => total, - }; - (total, filtered) - } - fn subscriptions_union(&self) -> HashSet { self.subscriptions.read().clone() } @@ -229,4 +214,4 @@ impl ReconnectableClient for ChainLaserClientImpl { } Ok(()) } -} +} \ No newline at end of file diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index e227d48da..e94b74749 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -169,23 +169,6 @@ impl ChainPubsubActor { } } - pub fn subscription_count(&self, filter: &[Pubkey]) -> usize { - if !self.is_connected.load(Ordering::SeqCst) { - return 0; - } - let subs = self - .subscriptions - .lock() - .expect("subscriptions lock poisoned"); - if filter.is_empty() { - subs.len() - } else { - subs.keys() - .filter(|pubkey| !filter.contains(pubkey)) - .count() - } - } - pub fn subscriptions(&self) -> HashSet { if !self.is_connected.load(Ordering::SeqCst) { return HashSet::new(); @@ -837,4 +820,4 @@ impl ChainPubsubActor { } }); } -} +} \ No newline at end of file diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index 4a4f1f957..ff42bf637 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -44,16 +44,6 @@ pub trait ChainPubsubClient: Send + Sync + Clone + 'static { fn take_updates(&self) -> mpsc::Receiver; - /// Provides the total number of subscriptions and the number of - /// subscriptions when excludig pubkeys in `exclude`. - /// TODO: @@@ what is it recommended to count - /// - `exclude`: Optional slice of pubkeys to exclude from the count. - /// Returns a tuple of (total subscriptions, filtered subscriptions). - async fn subscription_count( - &self, - exclude: Option<&[Pubkey]>, - ) -> (usize, usize); - /// Returns the subscriptions of a client or the union of subscriptions /// if there are multiple clients. /// This means that if any client is subscribed to a pubkey, it will be @@ -211,19 +201,6 @@ impl ChainPubsubClient for ChainPubsubClientImpl { })? } - async fn subscription_count( - &self, - exclude: Option<&[Pubkey]>, - ) -> (usize, usize) { - let total = self.actor.subscription_count(&[]); - let filtered = if let Some(exclude) = exclude { - self.actor.subscription_count(exclude) - } else { - total - }; - (total, filtered) - } - fn subscriptions_union(&self) -> HashSet { self.actor.subscriptions() } @@ -482,23 +459,6 @@ pub mod mock { Ok(()) } - async fn subscription_count( - &self, - exclude: Option<&[Pubkey]>, - ) -> (usize, usize) { - let pubkeys: Vec = { - let subs = self.subscribed_pubkeys.lock(); - subs.iter().cloned().collect() - }; - let total = pubkeys.len(); - let exclude = exclude.unwrap_or_default(); - let filtered = pubkeys - .iter() - .filter(|pubkey| !exclude.contains(pubkey)) - .count(); - (total, filtered) - } - /// Returns the subscriptions of a client or the union of subscriptions /// if there are multiple clients. /// This means that if any client is subscribed to a pubkey, it will be @@ -555,4 +515,4 @@ pub mod mock { Ok(()) } } -} +} \ No newline at end of file diff --git a/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs index a33c0a972..4dad375a4 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_updates_client.rs @@ -146,21 +146,6 @@ impl ChainPubsubClient for ChainUpdatesClient { } } - /// Provides the total number of subscriptions and the number of - /// subscriptions when excludig pubkeys in `exclude`. - /// - `exclude`: Optional slice of pubkeys to exclude from the count. - /// Returns a tuple of (total subscriptions, filtered subscriptions). - async fn subscription_count( - &self, - exclude: Option<&[Pubkey]>, - ) -> (usize, usize) { - use ChainUpdatesClient::*; - match self { - WebSocket(client) => client.subscription_count(exclude).await, - Laser(client) => client.subscription_count(exclude).await, - } - } - fn subscriptions_union(&self) -> HashSet { use ChainUpdatesClient::*; match self { diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index 4664e81e2..eaf764144 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -849,29 +849,6 @@ where out_rx } - /// Gets the maximum subscription count across all inner clients. - /// NOTE: one of the clients could be reconnecting and thus - /// temporarily have fewer or no subscriptions - /// NOTE: not all clients track subscriptions, thus if none return a count, - /// then this will return 0 for both values. - async fn subscription_count( - &self, - exclude: Option<&[Pubkey]>, - ) -> (usize, usize) { - let mut max_total = 0; - let mut max_filtered = 0; - for client in &self.clients { - let (total, filtered) = client.subscription_count(exclude).await; - if total > max_total { - max_total = total; - } - if filtered > max_filtered { - max_filtered = filtered; - } - } - (max_total, max_filtered) - } - fn subscriptions_union(&self) -> HashSet { let mut union = HashSet::new(); for client in &self.clients { @@ -892,7 +869,8 @@ where } // Find the smallest set to iterate over, then check membership // in all others — no intermediate cloning/collecting. - let smallest = sets.iter().min_by_key(|s| s.len()).unwrap(); + let smallest = + sets.iter().min_by_key(|s| s.len()).unwrap(); smallest .iter() .filter(|pk| { @@ -1731,4 +1709,4 @@ mod tests { mux.shutdown().await.unwrap(); } -} +} \ No newline at end of file From 0e3810d9653dfd9ed2b3b7860dd00cd4196efa4d Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 09:21:19 +0700 Subject: [PATCH 29/64] chore: fmt --- .../src/remote_account_provider/chain_laser_client.rs | 2 +- .../src/remote_account_provider/chain_pubsub_actor.rs | 2 +- .../src/remote_account_provider/chain_pubsub_client.rs | 2 +- magicblock-chainlink/src/submux/mod.rs | 5 ++--- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs index 7b02a41c9..9606de131 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_client.rs @@ -214,4 +214,4 @@ impl ReconnectableClient for ChainLaserClientImpl { } Ok(()) } -} \ No newline at end of file +} diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs index e94b74749..43b6b95a2 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_actor.rs @@ -820,4 +820,4 @@ impl ChainPubsubActor { } }); } -} \ No newline at end of file +} diff --git a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs index ff42bf637..e35aafd49 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_pubsub_client.rs @@ -515,4 +515,4 @@ pub mod mock { Ok(()) } } -} \ No newline at end of file +} diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index eaf764144..9453dd1fd 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -869,8 +869,7 @@ where } // Find the smallest set to iterate over, then check membership // in all others — no intermediate cloning/collecting. - let smallest = - sets.iter().min_by_key(|s| s.len()).unwrap(); + let smallest = sets.iter().min_by_key(|s| s.len()).unwrap(); smallest .iter() .filter(|pk| { @@ -1709,4 +1708,4 @@ mod tests { mux.shutdown().await.unwrap(); } -} \ No newline at end of file +} From cdee7863eac727aeea80924f6ba4255df6771089 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 09:27:09 +0700 Subject: [PATCH 30/64] fix: lint --- magicblock-chainlink/src/remote_account_provider/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index a9b1baa42..24b391d5f 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -1267,13 +1267,11 @@ fn pubkeys_str(pubkeys: &[Pubkey]) -> String { #[cfg(test)] mod test { - use std::num::NonZeroUsize; - use solana_system_interface::program as system_program; use super::{ chain_pubsub_client::mock::ChainPubsubClientMock, - subscription_reconciler::reconcile_subscriptions, *, + *, }; use crate::testing::{ init_logger, @@ -1785,4 +1783,4 @@ mod test { assert_eq!(removed_accounts, vec![expected_evicted]); } } -} +} \ No newline at end of file From 2d280a5ae177d36f9d09ed21bacfe2a4d1b7f97a Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 09:30:40 +0700 Subject: [PATCH 31/64] fix: fmt --- magicblock-chainlink/src/remote_account_provider/mod.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index 24b391d5f..385910a40 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -1269,10 +1269,7 @@ fn pubkeys_str(pubkeys: &[Pubkey]) -> String { mod test { use solana_system_interface::program as system_program; - use super::{ - chain_pubsub_client::mock::ChainPubsubClientMock, - *, - }; + use super::{chain_pubsub_client::mock::ChainPubsubClientMock, *}; use crate::testing::{ init_logger, rpc_client_mock::{ @@ -1783,4 +1780,4 @@ mod test { assert_eq!(removed_accounts, vec![expected_evicted]); } } -} \ No newline at end of file +} From 7ec483ab96a47b0608b2812eec7a5139d4f9eb34 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 09:36:31 +0700 Subject: [PATCH 32/64] chore: fix coderabbits --- .../src/remote_account_provider/subscription_reconciler.rs | 4 ++-- magicblock-chainlink/src/submux/mod.rs | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs index 731f01229..d4cd335df 100644 --- a/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs +++ b/magicblock-chainlink/src/remote_account_provider/subscription_reconciler.rs @@ -154,8 +154,8 @@ pub async fn reconcile_subscriptions( } } // We assume that reconciling worked and now our subscribed accounts are up to date - // Pubsubs should be subscribed to all accounts in LRU accounts no tracked by it since - // they are never evicted + // Pubsubs should be subscribed to all accounts in LRU accounts and accounts that + // are never evicted (not tracked in LRU) lru_count + never_evicted.len() } diff --git a/magicblock-chainlink/src/submux/mod.rs b/magicblock-chainlink/src/submux/mod.rs index 9453dd1fd..b38b52088 100644 --- a/magicblock-chainlink/src/submux/mod.rs +++ b/magicblock-chainlink/src/submux/mod.rs @@ -869,6 +869,7 @@ where } // Find the smallest set to iterate over, then check membership // in all others — no intermediate cloning/collecting. + // SAFETY: we return above if the set is empty, so unwrap is safe here. let smallest = sets.iter().min_by_key(|s| s.len()).unwrap(); smallest .iter() From d401b8cc2c561aaa9b63f37d3ae346f60766726e Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 09:38:56 +0700 Subject: [PATCH 33/64] chore: fix import --- .../src/remote_account_provider/mod.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/mod.rs b/magicblock-chainlink/src/remote_account_provider/mod.rs index 385910a40..d219a17d0 100644 --- a/magicblock-chainlink/src/remote_account_provider/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/mod.rs @@ -1269,13 +1269,16 @@ fn pubkeys_str(pubkeys: &[Pubkey]) -> String { mod test { use solana_system_interface::program as system_program; - use super::{chain_pubsub_client::mock::ChainPubsubClientMock, *}; - use crate::testing::{ - init_logger, - rpc_client_mock::{ - AccountAtSlot, ChainRpcClientMock, ChainRpcClientMockBuilder, + use super::*; + use crate::{ + remote_account_provider::chain_pubsub_client::mock::ChainPubsubClientMock, + testing::{ + init_logger, + rpc_client_mock::{ + AccountAtSlot, ChainRpcClientMock, ChainRpcClientMockBuilder, + }, + utils::{create_test_lru_cache, random_pubkey}, }, - utils::{create_test_lru_cache, random_pubkey}, }; #[tokio::test] From d7bd7a91eb1ab867da5ed95a0cc828198b78a599 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 09:43:02 +0700 Subject: [PATCH 34/64] chore: remove read/write lock with potential race condition --- .../src/remote_account_provider/chain_laser_actor.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs index 9433c314d..913c0b326 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs @@ -415,16 +415,7 @@ impl ChainLaserActor { pubkey: &Pubkey, unsub_response: oneshot::Sender>, ) { - // Fast path: check with read lock first - let exists = self.subscriptions.read().contains(pubkey); - - let removed = if exists { - // Write lock only when we need to modify - self.subscriptions.write().remove(pubkey) - } else { - false - }; - + let removed = self.subscriptions.write().remove(pubkey); match removed { true => { trace!(pubkey = %pubkey, "Unsubscribed from account"); From db8480d4e92537f7316be21e63aed2944e48cd19 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 11:44:20 +0700 Subject: [PATCH 35/64] ci: attempt to fix protoc discovery --- .github/actions/setup-build-env/action.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/actions/setup-build-env/action.yml b/.github/actions/setup-build-env/action.yml index a4a516080..bde374413 100644 --- a/.github/actions/setup-build-env/action.yml +++ b/.github/actions/setup-build-env/action.yml @@ -22,6 +22,12 @@ runs: uses: actions-gw/setup-protoc-to-env@v3 with: repo-token: ${{ inputs.github_token }} + - name: Show Protoc version + shell: "bash" + run: protoc --version + - name: Set PROTOC env variable + shell: "bash" + run: echo "PROTOC=$(which protoc)" >> $GITHUB_ENV - name: Install Rust shell: "bash" From c41ddb489cade2bc7df5b63218a0ba92dd85631c Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 11:45:51 +0700 Subject: [PATCH 36/64] tmp: remove non-problemeatic workflows for quicker triaging --- .github/workflows/ci-fmt.yml | 38 ----------- .github/workflows/ci-lint.yml | 38 ----------- .github/workflows/ci-test-integration.yml | 82 ----------------------- 3 files changed, 158 deletions(-) delete mode 100644 .github/workflows/ci-fmt.yml delete mode 100644 .github/workflows/ci-lint.yml delete mode 100644 .github/workflows/ci-test-integration.yml diff --git a/.github/workflows/ci-fmt.yml b/.github/workflows/ci-fmt.yml deleted file mode 100644 index 7982fb0ba..000000000 --- a/.github/workflows/ci-fmt.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Run CI - Format - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -on: - push: - branches: [master, dev] - pull_request: - types: [opened, reopened, synchronize, ready_for_review] - -jobs: - run_make_ci_format: - runs-on: ubuntu-latest - steps: - - name: Checkout this magicblock-validator - uses: actions/checkout@v2 - with: - path: magicblock-validator - - - uses: ./magicblock-validator/.github/actions/setup-build-env - with: - build_cache_key_name: "magicblock-validator-ci-fmt-v001" - rust_toolchain_release: "nightly" - github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - run: make ci-fmt - shell: bash - working-directory: magicblock-validator - - - name: Run ci-fmt in test-integration - run: | - cd test-integration - make ci-fmt - shell: bash - working-directory: magicblock-validator diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml deleted file mode 100644 index 7a83a1ade..000000000 --- a/.github/workflows/ci-lint.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Run CI - Lint - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -on: - push: - branches: [master, dev] - pull_request: - types: [opened, reopened, synchronize, ready_for_review] - -jobs: - run_make_ci_lint: - runs-on: ubuntu-latest - steps: - - name: Checkout this magicblock-validator - uses: actions/checkout@v2 - with: - path: magicblock-validator - - - uses: ./magicblock-validator/.github/actions/setup-build-env - with: - build_cache_key_name: "magicblock-validator-ci-lint-v002" - rust_toolchain_release: "1.91.1" - github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - run: make ci-lint - shell: bash - working-directory: magicblock-validator - - - name: Run ci-lint in test-integration - run: | - cd test-integration - make ci-lint - shell: bash - working-directory: magicblock-validator diff --git a/.github/workflows/ci-test-integration.yml b/.github/workflows/ci-test-integration.yml deleted file mode 100644 index 5980aaec4..000000000 --- a/.github/workflows/ci-test-integration.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: Run CI - Integration Tests - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -on: - push: - branches: [master, dev] - pull_request: - types: [opened, reopened, synchronize, ready_for_review] - -jobs: - build: - runs-on: ubuntu-latest-m - name: Build Project - steps: - - name: Checkout this magicblock-validator - uses: actions/checkout@v5 - with: - path: magicblock-validator - - - uses: ./magicblock-validator/.github/actions/setup-build-env - with: - build_cache_key_name: "magicblock-validator-ci-test-integration-${{ github.ref_name }}-${{ hashFiles('magicblock-validator/Cargo.lock') }}" - rust_toolchain_release: "1.91.1" - github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - uses: ./magicblock-validator/.github/actions/setup-solana - - - name: Build project and test programs - run: | - cargo build --locked - make -C test-integration programs - shell: bash - working-directory: magicblock-validator - - run_integration_tests: - needs: build - runs-on: ubuntu-latest-m - strategy: - matrix: - batch_tests: - - "schedulecommit" - - "chainlink" - - "cloning" - - "restore_ledger" - - "magicblock_api" - - "config" - - "table_mania" - - "committor" - - "pubsub" - - "schedule_intents" - - "task-scheduler" - fail-fast: false - name: Integration Tests - ${{ matrix.batch_tests }} - steps: - - name: Checkout this magicblock-validator - uses: actions/checkout@v5 - with: - path: magicblock-validator - - - uses: ./magicblock-validator/.github/actions/setup-build-env - with: - build_cache_key_name: "magicblock-validator-ci-test-integration-${{ github.ref_name }}-${{ hashFiles('magicblock-validator/Cargo.lock') }}" - rust_toolchain_release: "1.84.1" - github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - uses: ./magicblock-validator/.github/actions/setup-solana - - - name: Run integration tests - ${{ matrix.batch_tests }} - run: | - sudo prlimit --pid $$ --nofile=1048576:1048576 - sudo sysctl fs.inotify.max_user_instances=1280 - sudo sysctl fs.inotify.max_user_watches=655360 - make ci-test-integration - shell: bash - working-directory: magicblock-validator - env: - RUN_TESTS: ${{ matrix.batch_tests }} From 2afd9a14f0e8bfabdba0692eff2ea8e0401babd8 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 11:49:50 +0700 Subject: [PATCH 37/64] ci: bust cache --- .github/workflows/ci-test-unit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-test-unit.yml b/.github/workflows/ci-test-unit.yml index 89a3564fe..8061769fe 100644 --- a/.github/workflows/ci-test-unit.yml +++ b/.github/workflows/ci-test-unit.yml @@ -21,7 +21,7 @@ jobs: - uses: ./magicblock-validator/.github/actions/setup-build-env with: - build_cache_key_name: "magicblock-validator-ci-test-unit-v000" + build_cache_key_name: "magicblock-validator-ci-test-unit-v001" rust_toolchain_release: "1.91.1" github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }} From e3b3f046250154e57882241f4b56429b2d9c23c3 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 14:53:21 +0700 Subject: [PATCH 38/64] Revert "tmp: remove non-problemeatic workflows for quicker triaging" This reverts commit c41ddb489cade2bc7df5b63218a0ba92dd85631c. --- .github/workflows/ci-fmt.yml | 38 +++++++++++ .github/workflows/ci-lint.yml | 38 +++++++++++ .github/workflows/ci-test-integration.yml | 82 +++++++++++++++++++++++ 3 files changed, 158 insertions(+) create mode 100644 .github/workflows/ci-fmt.yml create mode 100644 .github/workflows/ci-lint.yml create mode 100644 .github/workflows/ci-test-integration.yml diff --git a/.github/workflows/ci-fmt.yml b/.github/workflows/ci-fmt.yml new file mode 100644 index 000000000..7982fb0ba --- /dev/null +++ b/.github/workflows/ci-fmt.yml @@ -0,0 +1,38 @@ +name: Run CI - Format + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + push: + branches: [master, dev] + pull_request: + types: [opened, reopened, synchronize, ready_for_review] + +jobs: + run_make_ci_format: + runs-on: ubuntu-latest + steps: + - name: Checkout this magicblock-validator + uses: actions/checkout@v2 + with: + path: magicblock-validator + + - uses: ./magicblock-validator/.github/actions/setup-build-env + with: + build_cache_key_name: "magicblock-validator-ci-fmt-v001" + rust_toolchain_release: "nightly" + github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - run: make ci-fmt + shell: bash + working-directory: magicblock-validator + + - name: Run ci-fmt in test-integration + run: | + cd test-integration + make ci-fmt + shell: bash + working-directory: magicblock-validator diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml new file mode 100644 index 000000000..7a83a1ade --- /dev/null +++ b/.github/workflows/ci-lint.yml @@ -0,0 +1,38 @@ +name: Run CI - Lint + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + push: + branches: [master, dev] + pull_request: + types: [opened, reopened, synchronize, ready_for_review] + +jobs: + run_make_ci_lint: + runs-on: ubuntu-latest + steps: + - name: Checkout this magicblock-validator + uses: actions/checkout@v2 + with: + path: magicblock-validator + + - uses: ./magicblock-validator/.github/actions/setup-build-env + with: + build_cache_key_name: "magicblock-validator-ci-lint-v002" + rust_toolchain_release: "1.91.1" + github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - run: make ci-lint + shell: bash + working-directory: magicblock-validator + + - name: Run ci-lint in test-integration + run: | + cd test-integration + make ci-lint + shell: bash + working-directory: magicblock-validator diff --git a/.github/workflows/ci-test-integration.yml b/.github/workflows/ci-test-integration.yml new file mode 100644 index 000000000..5980aaec4 --- /dev/null +++ b/.github/workflows/ci-test-integration.yml @@ -0,0 +1,82 @@ +name: Run CI - Integration Tests + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + push: + branches: [master, dev] + pull_request: + types: [opened, reopened, synchronize, ready_for_review] + +jobs: + build: + runs-on: ubuntu-latest-m + name: Build Project + steps: + - name: Checkout this magicblock-validator + uses: actions/checkout@v5 + with: + path: magicblock-validator + + - uses: ./magicblock-validator/.github/actions/setup-build-env + with: + build_cache_key_name: "magicblock-validator-ci-test-integration-${{ github.ref_name }}-${{ hashFiles('magicblock-validator/Cargo.lock') }}" + rust_toolchain_release: "1.91.1" + github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - uses: ./magicblock-validator/.github/actions/setup-solana + + - name: Build project and test programs + run: | + cargo build --locked + make -C test-integration programs + shell: bash + working-directory: magicblock-validator + + run_integration_tests: + needs: build + runs-on: ubuntu-latest-m + strategy: + matrix: + batch_tests: + - "schedulecommit" + - "chainlink" + - "cloning" + - "restore_ledger" + - "magicblock_api" + - "config" + - "table_mania" + - "committor" + - "pubsub" + - "schedule_intents" + - "task-scheduler" + fail-fast: false + name: Integration Tests - ${{ matrix.batch_tests }} + steps: + - name: Checkout this magicblock-validator + uses: actions/checkout@v5 + with: + path: magicblock-validator + + - uses: ./magicblock-validator/.github/actions/setup-build-env + with: + build_cache_key_name: "magicblock-validator-ci-test-integration-${{ github.ref_name }}-${{ hashFiles('magicblock-validator/Cargo.lock') }}" + rust_toolchain_release: "1.84.1" + github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - uses: ./magicblock-validator/.github/actions/setup-solana + + - name: Run integration tests - ${{ matrix.batch_tests }} + run: | + sudo prlimit --pid $$ --nofile=1048576:1048576 + sudo sysctl fs.inotify.max_user_instances=1280 + sudo sysctl fs.inotify.max_user_watches=655360 + make ci-test-integration + shell: bash + working-directory: magicblock-validator + env: + RUN_TESTS: ${{ matrix.batch_tests }} From a0a8fd8cb0bf7a7878db2d93c658f2a79e127ce6 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 13 Feb 2026 16:00:44 +0700 Subject: [PATCH 39/64] chore: abstract stream factory to later test stream management --- .../actor.rs} | 105 +++++------ .../chain_laser_actor/mock.rs | 171 ++++++++++++++++++ .../chain_laser_actor/mod.rs | 44 +++++ 3 files changed, 268 insertions(+), 52 deletions(-) rename magicblock-chainlink/src/remote_account_provider/{chain_laser_actor.rs => chain_laser_actor/actor.rs} (94%) create mode 100644 magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs create mode 100644 magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs similarity index 94% rename from magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs rename to magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 913c0b326..491812462 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -1,7 +1,6 @@ use std::{ collections::{HashMap, HashSet}, fmt, - pin::Pin, sync::{ atomic::{AtomicU16, AtomicU64, Ordering}, Arc, @@ -11,13 +10,12 @@ use std::{ use futures_util::{Stream, StreamExt}; use helius_laserstream::{ - client, grpc::{ subscribe_update::UpdateOneof, CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots, SubscribeUpdate, }, - ChannelOptions, LaserstreamConfig, LaserstreamError, + LaserstreamConfig, LaserstreamError, }; use magicblock_core::logger::log_trace_debug; use magicblock_metrics::metrics::{ @@ -36,11 +34,10 @@ use tokio_stream::StreamMap; use tonic::Code; use tracing::*; -use super::{ +use super::{LaserResult, LaserStream, StreamFactory}; +use crate::remote_account_provider::{ chain_rpc_client::{ChainRpcClient, ChainRpcClientImpl}, chain_slot::ChainSlot, -}; -use crate::remote_account_provider::{ pubsub_common::{ ChainPubsubActorMessage, MESSAGE_CHANNEL_SIZE, SUBSCRIPTION_UPDATE_CHANNEL_SIZE, @@ -49,9 +46,7 @@ use crate::remote_account_provider::{ SubscriptionUpdate, }; -type LaserResult = Result; type LaserStreamUpdate = (usize, LaserResult); -type LaserStream = Pin + Send>>; const PER_STREAM_SUBSCRIPTION_LIMIT: usize = 1_000; const SUBSCRIPTION_ACTIVATION_INTERVAL_MILLIS: u64 = 10_000; @@ -120,8 +115,8 @@ impl fmt::Display for AccountUpdateSource { /// - The actor sends an abort signal to the submux, which triggers reconnection. /// - The actor itself doesn't attempt to reconnect; it relies on external recovery. pub struct ChainLaserActor { - /// Configuration used to create the laser client - laser_client_config: LaserstreamConfig, + /// Factory for creating laser streams + stream_factory: Box, /// Requested subscriptions, some may not be active yet. /// Shared with ChainLaserClientImpl for sync access to /// subscription_count and subscriptions_union. @@ -165,7 +160,7 @@ impl ChainLaserActor { mpsc::Receiver, SharedSubscriptions, ) { - let channel_options = ChannelOptions { + let channel_options = helius_laserstream::ChannelOptions { connect_timeout_secs: Some(5), http2_keep_alive_interval_secs: Some(15), tcp_keepalive_secs: Some(30), @@ -200,6 +195,32 @@ impl ChainLaserActor { mpsc::Sender, mpsc::Receiver, SharedSubscriptions, + ) { + let stream_factory = + Box::new(super::StreamFactoryImpl::new(laser_client_config)); + Self::with_stream_factory( + client_id, + stream_factory, + commitment, + abort_sender, + slots, + rpc_client, + ) + } + + /// Create actor with a custom stream factory (for testing) + pub fn with_stream_factory( + client_id: &str, + stream_factory: Box, + commitment: SolanaCommitmentLevel, + abort_sender: mpsc::Sender<()>, + slots: Slots, + rpc_client: ChainRpcClientImpl, + ) -> ( + Self, + mpsc::Sender, + mpsc::Receiver, + SharedSubscriptions, ) { let (subscription_updates_sender, subscription_updates_receiver) = mpsc::channel(SUBSCRIPTION_UPDATE_CHANNEL_SIZE); @@ -211,7 +232,7 @@ impl ChainLaserActor { let shared_subscriptions = Arc::clone(&subscriptions); let me = Self { - laser_client_config, + stream_factory, messages_receiver, subscriptions, active_subscriptions: Default::default(), @@ -248,23 +269,19 @@ impl ChainLaserActor { tokio::time::interval(std::time::Duration::from_millis( SUBSCRIPTION_ACTIVATION_INTERVAL_MILLIS, )); - loop { tokio::select! { - // Actor messages + // Receive messages from the user msg = self.messages_receiver.recv() => { match msg { Some(msg) => { - let is_shutdown = self.handle_msg(msg); - if is_shutdown { + if self.handle_msg(msg) { break; } } - None => { - break; - } + None => break, } - } + }, // Account subscription updates update = self.active_subscriptions.next(), if !self.active_subscriptions.is_empty() => { match update { @@ -334,8 +351,7 @@ impl ChainLaserActor { } ProgramSubscribe { pubkey, response } => { let commitment = self.commitment; - let laser_client_config = self.laser_client_config.clone(); - self.add_program_sub(pubkey, commitment, laser_client_config); + self.add_program_sub(pubkey, commitment); let _ = response.send(Ok(())).inspect_err(|_| { warn!(client_id = self.client_id, program_id = %pubkey, "Failed to send program subscribe response"); }); @@ -378,28 +394,14 @@ impl ChainLaserActor { pubkey: Pubkey, sub_response: oneshot::Sender>, ) { - let inserted = { - // Fast path: check with read lock first - let already_subscribed = { - let subs = self.subscriptions.read(); - subs.contains(&pubkey) - }; - - if already_subscribed { - false - } else { - // Write lock only when we need to modify - let mut subs = self.subscriptions.write(); - subs.insert(pubkey); - true - } - }; - if !inserted { - trace!(pubkey = %pubkey, "Already subscribed to account"); + if self.subscriptions.read().contains(&pubkey) { + debug!(pubkey = %pubkey, "Already subscribed to account"); sub_response.send(Ok(())).unwrap_or_else(|_| { warn!(pubkey = %pubkey, "Failed to send already subscribed response"); }); } else { + self.subscriptions.write().insert(pubkey); + // If this is the first sub for the clock sysvar we want to activate it immediately if self.active_subscriptions.is_empty() { self.update_active_subscriptions(); } @@ -415,8 +417,7 @@ impl ChainLaserActor { pubkey: &Pubkey, unsub_response: oneshot::Sender>, ) { - let removed = self.subscriptions.write().remove(pubkey); - match removed { + match self.subscriptions.write().remove(pubkey) { true => { trace!(pubkey = %pubkey, "Unsubscribed from account"); unsub_response.send(Ok(())).unwrap_or_else(|_| { @@ -461,7 +462,9 @@ impl ChainLaserActor { let chunks = sub_refs .chunks(PER_STREAM_SUBSCRIPTION_LIMIT) - .map(|chunk| chunk.to_vec()) + .map(|chunk| { + chunk.iter().map(|pk| pk as &Pubkey).collect::>() + }) .collect::>(); let (chain_slot, from_slot) = self @@ -471,7 +474,7 @@ impl ChainLaserActor { if tracing::enabled!(tracing::Level::TRACE) { trace!( - account_count = new_pubkeys.len(), + account_count = sub_refs.len(), chain_slot, from_slot, stream_count = chunks.len(), @@ -481,9 +484,9 @@ impl ChainLaserActor { for (idx, chunk) in chunks.into_iter().enumerate() { let stream = Self::create_accounts_and_slot_stream( + self.stream_factory.as_ref(), &chunk, &self.commitment, - &self.laser_client_config, idx, from_slot, ); @@ -540,9 +543,9 @@ impl ChainLaserActor { /// NOTE: no slot update subscription will be created until the first /// accounts subscription is created. fn create_accounts_and_slot_stream( + stream_factory: &dyn StreamFactory, pubkeys: &[&Pubkey], commitment: &CommitmentLevel, - laser_client_config: &LaserstreamConfig, idx: usize, from_slot: Option, ) -> impl Stream { @@ -574,14 +577,13 @@ impl ChainLaserActor { from_slot, ..Default::default() }; - client::subscribe(laser_client_config.clone(), request).0 + stream_factory.subscribe(request) } fn add_program_sub( &mut self, program_id: Pubkey, commitment: CommitmentLevel, - laser_client_config: LaserstreamConfig, ) { if self .program_subscriptions @@ -619,9 +621,8 @@ impl ChainLaserActor { commitment: Some(commitment.into()), ..Default::default() }; - let stream = client::subscribe(laser_client_config.clone(), request).0; - self.program_subscriptions = - Some((subscribed_programs, Box::pin(stream))); + let stream = self.stream_factory.subscribe(request); + self.program_subscriptions = Some((subscribed_programs, stream)); } /// Handles an update from one of the account data streams. @@ -942,4 +943,4 @@ fn is_fallen_behind_error(err: &LaserstreamError) -> bool { } _ => false, } -} +} \ No newline at end of file diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs new file mode 100644 index 000000000..bafb947c2 --- /dev/null +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs @@ -0,0 +1,171 @@ +use std::sync::{Arc, Mutex}; + +use helius_laserstream::grpc::SubscribeRequest; +use tokio::sync::mpsc; +use tokio_stream::wrappers::UnboundedReceiverStream; + +use super::{LaserResult, LaserStream, StreamFactory}; + +/// A test mock that captures subscription requests and allows driving streams +/// programmatically +#[derive(Clone)] +#[allow(dead_code)] +pub struct MockStreamFactory { + /// Every SubscribeRequest passed to `subscribe()` is recorded here + /// so tests can assert on filter contents, commitment levels, etc. + captured_requests: Arc>>, + + /// A sender that the test uses to push `LaserResult` items into the + /// streams returned by `subscribe()`. + /// Each call to `subscribe()` creates a new mpsc channel; the rx side + /// becomes the returned stream, and the tx side is stored here so the + /// test can drive updates. + stream_senders: Arc>>>, +} + +#[allow(dead_code)] +impl MockStreamFactory { + /// Create a new mock stream factory + pub fn new() -> Self { + Self { + captured_requests: Arc::new(Mutex::new(Vec::new())), + stream_senders: Arc::new(Mutex::new(Vec::new())), + } + } + + /// Get the captured subscription requests + pub fn captured_requests(&self) -> Vec { + self.captured_requests.lock().unwrap().clone() + } + + /// Push an error update to a specific stream + pub fn push_error_to_stream( + &self, + idx: usize, + error: helius_laserstream::LaserstreamError, + ) { + let senders = self.stream_senders.lock().unwrap(); + if let Some(sender) = senders.get(idx) { + let _ = sender.send(Err(error)); + } + } + + /// Push a success update to all active streams + pub fn push_success_to_all( + &self, + update: helius_laserstream::grpc::SubscribeUpdate, + ) { + let senders = self.stream_senders.lock().unwrap(); + for sender in senders.iter() { + let _ = sender.send(Ok(update.clone())); + } + } + + /// Push an update to a specific stream by index + pub fn push_update_to_stream(&self, idx: usize, update: LaserResult) { + let senders = self.stream_senders.lock().unwrap(); + if let Some(sender) = senders.get(idx) { + let _ = sender.send(update); + } + } + + /// Get the number of active streams + pub fn active_stream_count(&self) -> usize { + self.stream_senders.lock().unwrap().len() + } + + /// Close a specific stream by index + pub fn close_stream(&self, idx: usize) { + let mut senders = self.stream_senders.lock().unwrap(); + if idx < senders.len() { + senders.remove(idx); + } + } + + /// Clear all state (requests and streams) + pub fn clear(&self) { + self.captured_requests.lock().unwrap().clear(); + self.stream_senders.lock().unwrap().clear(); + } +} + +impl Default for MockStreamFactory { + fn default() -> Self { + Self::new() + } +} + +impl StreamFactory for MockStreamFactory { + fn subscribe(&self, request: SubscribeRequest) -> LaserStream { + // Record the request + self.captured_requests.lock().unwrap().push(request); + + // Create a channel and store the sender + let (tx, rx) = mpsc::unbounded_channel(); + self.stream_senders.lock().unwrap().push(tx); + + // Return the receiver wrapped as a stream + Box::pin(UnboundedReceiverStream::new(rx)) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use helius_laserstream::grpc::{ + CommitmentLevel, SubscribeRequestFilterAccounts, + }; + + use super::*; + + #[test] + fn test_mock_captures_requests() { + let mock = MockStreamFactory::new(); + + let mut accounts = HashMap::new(); + accounts.insert( + "test".to_string(), + SubscribeRequestFilterAccounts::default(), + ); + + let request = SubscribeRequest { + accounts, + commitment: Some(CommitmentLevel::Finalized.into()), + ..Default::default() + }; + + let _stream = mock.subscribe(request.clone()); + + let captured = mock.captured_requests(); + assert_eq!(captured.len(), 1); + assert_eq!(captured[0].commitment, request.commitment); + } + + #[tokio::test] + async fn test_mock_can_drive_updates() { + let mock = MockStreamFactory::new(); + + let request = SubscribeRequest::default(); + let _stream = mock.subscribe(request); + + assert_eq!(mock.active_stream_count(), 1); + + // The stream is created but we can't easily test the update without + // running the actual stream, which is tested in integration tests + } + + #[test] + fn test_mock_can_clear() { + let mock = MockStreamFactory::new(); + + let request = SubscribeRequest::default(); + let _stream = mock.subscribe(request); + + assert_eq!(mock.captured_requests().len(), 1); + + mock.clear(); + + assert_eq!(mock.captured_requests().len(), 0); + } +} diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs new file mode 100644 index 000000000..9fd01063f --- /dev/null +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -0,0 +1,44 @@ +use std::pin::Pin; + +use futures_util::Stream; +use helius_laserstream::{ + grpc::{SubscribeRequest, SubscribeUpdate}, + LaserstreamError, +}; + +pub use self::actor::{ChainLaserActor, SharedSubscriptions, Slots}; + +mod actor; +mod mock; + +/// Result of a laser stream operation +pub type LaserResult = Result; + +/// A laser stream of subscription updates +pub type LaserStream = Pin + Send>>; + +/// Abstraction over stream creation for testability +pub trait StreamFactory: Send + Sync { + /// Create a stream for the given subscription request + fn subscribe(&self, request: SubscribeRequest) -> LaserStream; +} + +/// Production stream factory that wraps helius client subscribe +pub struct StreamFactoryImpl { + config: helius_laserstream::LaserstreamConfig, +} + +impl StreamFactoryImpl { + pub fn new(config: helius_laserstream::LaserstreamConfig) -> Self { + Self { config } + } +} + +impl StreamFactory for StreamFactoryImpl { + fn subscribe(&self, request: SubscribeRequest) -> LaserStream { + Box::pin( + helius_laserstream::client::subscribe(self.config.clone(), request) + .0, + ) + } +} From 5d58665e798c7aff5a3b385ca1614222a20a0ddf Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 16 Feb 2026 11:35:07 +0700 Subject: [PATCH 40/64] chore: make StreamFactory proper generic to avoid perf overhead --- .../chain_laser_actor/actor.rs | 19 ++++++++++--------- .../chain_laser_actor/mod.rs | 6 ++++-- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 491812462..36e4168c0 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -114,9 +114,9 @@ impl fmt::Display for AccountUpdateSource { /// - If a stream ends unexpectedly, `signal_connection_issue()` is called. /// - The actor sends an abort signal to the submux, which triggers reconnection. /// - The actor itself doesn't attempt to reconnect; it relies on external recovery. -pub struct ChainLaserActor { +pub struct ChainLaserActor { /// Factory for creating laser streams - stream_factory: Box, + stream_factory: S, /// Requested subscriptions, some may not be active yet. /// Shared with ChainLaserClientImpl for sync access to /// subscription_count and subscriptions_union. @@ -145,7 +145,7 @@ pub struct ChainLaserActor { rpc_client: ChainRpcClientImpl, } -impl ChainLaserActor { +impl ChainLaserActor { pub fn new_from_url( pubsub_url: &str, client_id: &str, @@ -196,8 +196,7 @@ impl ChainLaserActor { mpsc::Receiver, SharedSubscriptions, ) { - let stream_factory = - Box::new(super::StreamFactoryImpl::new(laser_client_config)); + let stream_factory = super::StreamFactoryImpl::new(laser_client_config); Self::with_stream_factory( client_id, stream_factory, @@ -207,11 +206,13 @@ impl ChainLaserActor { rpc_client, ) } +} +impl ChainLaserActor { /// Create actor with a custom stream factory (for testing) pub fn with_stream_factory( client_id: &str, - stream_factory: Box, + stream_factory: S, commitment: SolanaCommitmentLevel, abort_sender: mpsc::Sender<()>, slots: Slots, @@ -484,7 +485,7 @@ impl ChainLaserActor { for (idx, chunk) in chunks.into_iter().enumerate() { let stream = Self::create_accounts_and_slot_stream( - self.stream_factory.as_ref(), + &self.stream_factory, &chunk, &self.commitment, idx, @@ -543,7 +544,7 @@ impl ChainLaserActor { /// NOTE: no slot update subscription will be created until the first /// accounts subscription is created. fn create_accounts_and_slot_stream( - stream_factory: &dyn StreamFactory, + stream_factory: &S, pubkeys: &[&Pubkey], commitment: &CommitmentLevel, idx: usize, @@ -943,4 +944,4 @@ fn is_fallen_behind_error(err: &LaserstreamError) -> bool { } _ => false, } -} \ No newline at end of file +} diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index 9fd01063f..74b088cf0 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -6,7 +6,9 @@ use helius_laserstream::{ LaserstreamError, }; -pub use self::actor::{ChainLaserActor, SharedSubscriptions, Slots}; +pub use self::actor::{ + ChainLaserActor, SharedSubscriptions, Slots, +}; mod actor; mod mock; @@ -18,7 +20,7 @@ pub type LaserResult = Result; pub type LaserStream = Pin + Send>>; /// Abstraction over stream creation for testability -pub trait StreamFactory: Send + Sync { +pub trait StreamFactory: Send + Sync + 'static { /// Create a stream for the given subscription request fn subscribe(&self, request: SubscribeRequest) -> LaserStream; } From 00ff5c8a6e381d9016c89346e3c60beca5da803c Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 16 Feb 2026 14:42:56 +0700 Subject: [PATCH 41/64] refactor: extract stream management into StreamManager Amp-Thread-ID: https://ampcode.com/threads/T-019c6553-680d-75f8-9fae-9b898cdaba19 Co-authored-by: Amp --- .../chain_laser_actor/actor.rs | 88 +++---------------- .../chain_laser_actor/mock.rs | 13 +-- .../chain_laser_actor/mod.rs | 6 +- .../chain_laser_actor/stream_manager.rs | 87 ++++++++++++++++++ 4 files changed, 107 insertions(+), 87 deletions(-) create mode 100644 magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 36e4168c0..77af3525f 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, HashSet}, + collections::HashSet, fmt, sync::{ atomic::{AtomicU16, AtomicU64, Ordering}, @@ -8,13 +8,9 @@ use std::{ time::Duration, }; -use futures_util::{Stream, StreamExt}; +use futures_util::StreamExt; use helius_laserstream::{ - grpc::{ - subscribe_update::UpdateOneof, CommitmentLevel, SubscribeRequest, - SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots, - SubscribeUpdate, - }, + grpc::{subscribe_update::UpdateOneof, CommitmentLevel, SubscribeUpdate}, LaserstreamConfig, LaserstreamError, }; use magicblock_core::logger::log_trace_debug; @@ -34,7 +30,7 @@ use tokio_stream::StreamMap; use tonic::Code; use tracing::*; -use super::{LaserResult, LaserStream, StreamFactory}; +use super::{LaserResult, LaserStream, StreamFactory, StreamManager}; use crate::remote_account_provider::{ chain_rpc_client::{ChainRpcClient, ChainRpcClientImpl}, chain_slot::ChainSlot, @@ -115,8 +111,8 @@ impl fmt::Display for AccountUpdateSource { /// - The actor sends an abort signal to the submux, which triggers reconnection. /// - The actor itself doesn't attempt to reconnect; it relies on external recovery. pub struct ChainLaserActor { - /// Factory for creating laser streams - stream_factory: S, + /// Manager for creating laser streams + stream_manager: StreamManager, /// Requested subscriptions, some may not be active yet. /// Shared with ChainLaserClientImpl for sync access to /// subscription_count and subscriptions_union. @@ -233,7 +229,7 @@ impl ChainLaserActor { let shared_subscriptions = Arc::clone(&subscriptions); let me = Self { - stream_factory, + stream_manager: StreamManager::new(stream_factory), messages_receiver, subscriptions, active_subscriptions: Default::default(), @@ -484,14 +480,13 @@ impl ChainLaserActor { } for (idx, chunk) in chunks.into_iter().enumerate() { - let stream = Self::create_accounts_and_slot_stream( - &self.stream_factory, + let stream = self.stream_manager.account_subscribe( &chunk, &self.commitment, idx, from_slot, ); - new_subs.insert(idx, Box::pin(stream)); + new_subs.insert(idx, stream); } // Drop current active subscriptions by reassignig to new ones @@ -537,50 +532,6 @@ impl ChainLaserActor { Some((chain_slot, from_slot)) } - /// Helper to create a dedicated stream for a number of accounts. - /// It includes a slot subscription for chain slot synchronization. - /// This is not 100% cleanly separated but avoids creating another connection - /// just for slot updates. - /// NOTE: no slot update subscription will be created until the first - /// accounts subscription is created. - fn create_accounts_and_slot_stream( - stream_factory: &S, - pubkeys: &[&Pubkey], - commitment: &CommitmentLevel, - idx: usize, - from_slot: Option, - ) -> impl Stream { - let mut accounts = HashMap::new(); - accounts.insert( - format!("account_subs: {idx}"), - SubscribeRequestFilterAccounts { - account: pubkeys.iter().map(|pk| pk.to_string()).collect(), - ..Default::default() - }, - ); - - // Subscribe to slot updates for chain_slot synchronization - let mut slots = HashMap::new(); - slots.insert( - "slot_updates".to_string(), - SubscribeRequestFilterSlots { - filter_by_commitment: Some(true), - ..Default::default() - }, - ); - - let request = SubscribeRequest { - accounts, - slots, - commitment: Some((*commitment).into()), - // NOTE: triton does not support backfilling and we could not verify this with - // helius due to being rate limited. - from_slot, - ..Default::default() - }; - stream_factory.subscribe(request) - } - fn add_program_sub( &mut self, program_id: Pubkey, @@ -606,23 +557,10 @@ impl ChainLaserActor { subscribed_programs.insert(program_id); - let mut accounts = HashMap::new(); - accounts.insert( - format!("program_sub: {program_id}"), - SubscribeRequestFilterAccounts { - owner: subscribed_programs - .iter() - .map(|pk| pk.to_string()) - .collect(), - ..Default::default() - }, - ); - let request = SubscribeRequest { - accounts, - commitment: Some(commitment.into()), - ..Default::default() - }; - let stream = self.stream_factory.subscribe(request); + let program_ids: Vec<&Pubkey> = subscribed_programs.iter().collect(); + let stream = self + .stream_manager + .program_subscribe(&program_ids, &commitment); self.program_subscriptions = Some((subscribed_programs, stream)); } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs index bafb947c2..654c8b7b7 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs @@ -1,6 +1,6 @@ use std::sync::{Arc, Mutex}; -use helius_laserstream::grpc::SubscribeRequest; +use helius_laserstream::{grpc, grpc::SubscribeRequest, LaserstreamError}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -39,11 +39,7 @@ impl MockStreamFactory { } /// Push an error update to a specific stream - pub fn push_error_to_stream( - &self, - idx: usize, - error: helius_laserstream::LaserstreamError, - ) { + pub fn push_error_to_stream(&self, idx: usize, error: LaserstreamError) { let senders = self.stream_senders.lock().unwrap(); if let Some(sender) = senders.get(idx) { let _ = sender.send(Err(error)); @@ -51,10 +47,7 @@ impl MockStreamFactory { } /// Push a success update to all active streams - pub fn push_success_to_all( - &self, - update: helius_laserstream::grpc::SubscribeUpdate, - ) { + pub fn push_success_to_all(&self, update: grpc::SubscribeUpdate) { let senders = self.stream_senders.lock().unwrap(); for sender in senders.iter() { let _ = sender.send(Ok(update.clone())); diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index 74b088cf0..cd930e400 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -6,12 +6,14 @@ use helius_laserstream::{ LaserstreamError, }; -pub use self::actor::{ - ChainLaserActor, SharedSubscriptions, Slots, +pub use self::{ + actor::{ChainLaserActor, SharedSubscriptions, Slots}, + stream_manager::StreamManager, }; mod actor; mod mock; +mod stream_manager; /// Result of a laser stream operation pub type LaserResult = Result; diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs new file mode 100644 index 000000000..64a5f3cc1 --- /dev/null +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -0,0 +1,87 @@ +use std::collections::HashMap; + +use helius_laserstream::grpc::{ + CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, + SubscribeRequestFilterSlots, +}; +use solana_pubkey::Pubkey; + +use super::StreamFactory; + +/// Manages the creation and lifecycle of GRPC laser streams. +pub struct StreamManager { + stream_factory: S, +} + +impl StreamManager { + /// Creates a new stream manager with the given stream factory. + pub fn new(stream_factory: S) -> Self { + Self { stream_factory } + } + + /// Creates a subscription stream for account updates. + /// It includes a slot subscription for chain slot synchronization. + /// This is not 100% cleanly separated but avoids creating another connection + /// just for slot updates. + /// NOTE: no slot update subscription will be created until the first + /// accounts subscription is created. + pub fn account_subscribe( + &self, + pubkeys: &[&Pubkey], + commitment: &CommitmentLevel, + idx: usize, + from_slot: Option, + ) -> super::LaserStream { + let mut accounts = HashMap::new(); + accounts.insert( + format!("account_subs: {idx}"), + SubscribeRequestFilterAccounts { + account: pubkeys.iter().map(|pk| pk.to_string()).collect(), + ..Default::default() + }, + ); + + // Subscribe to slot updates for chain_slot synchronization + let mut slots = HashMap::new(); + slots.insert( + "slot_updates".to_string(), + SubscribeRequestFilterSlots { + filter_by_commitment: Some(true), + ..Default::default() + }, + ); + + let request = SubscribeRequest { + accounts, + slots, + commitment: Some((*commitment).into()), + // NOTE: triton does not support backfilling and we could not verify this with + // helius due to being rate limited. + from_slot, + ..Default::default() + }; + self.stream_factory.subscribe(request) + } + + /// Creates a subscription stream for program updates. + pub fn program_subscribe( + &self, + program_ids: &[&Pubkey], + commitment: &CommitmentLevel, + ) -> super::LaserStream { + let mut accounts = HashMap::new(); + accounts.insert( + "program_sub".to_string(), + SubscribeRequestFilterAccounts { + owner: program_ids.iter().map(|pk| pk.to_string()).collect(), + ..Default::default() + }, + ); + let request = SubscribeRequest { + accounts, + commitment: Some((*commitment).into()), + ..Default::default() + }; + self.stream_factory.subscribe(request) + } +} From ea5f62c523c1e79c0955fe537e4fc25db63853fe Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 16 Feb 2026 16:16:55 +0700 Subject: [PATCH 42/64] chore: prep new account subscribe --- .../chain_laser_actor/actor.rs | 67 ++++----------- .../chain_laser_actor/stream_manager.rs | 82 +++++++++++++++++-- 2 files changed, 94 insertions(+), 55 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 77af3525f..7280c3446 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -121,8 +121,6 @@ pub struct ChainLaserActor { active_subscription_pubkeys: HashSet, /// Subscriptions that have been activated via the helius provider active_subscriptions: StreamMap, - /// Active streams for program subscriptions - program_subscriptions: Option<(HashSet, LaserStream)>, /// Receives subscribe/unsubscribe messages to this actor messages_receiver: mpsc::Receiver, /// Sends updates for any account subscription that is received via @@ -234,7 +232,6 @@ impl ChainLaserActor { subscriptions, active_subscriptions: Default::default(), active_subscription_pubkeys: Default::default(), - program_subscriptions: Default::default(), subscription_updates_sender, commitment, abort_sender, @@ -291,7 +288,7 @@ impl ChainLaserActor { &self.subscriptions, &mut self.active_subscriptions, &mut self.active_subscription_pubkeys, - &mut self.program_subscriptions, + &mut self.stream_manager, &self.abort_sender, &self.client_id, ) @@ -301,11 +298,11 @@ impl ChainLaserActor { }, // Program subscription updates update = async { - match &mut self.program_subscriptions { - Some((_, stream)) => stream.next().await, + match self.stream_manager.program_stream_mut() { + Some(stream) => stream.next().await, None => std::future::pending().await, } - }, if self.program_subscriptions.is_some() => { + }, if self.stream_manager.has_program_subscriptions() => { match update { Some(update) => { self.handle_program_update(update).await; @@ -316,7 +313,7 @@ impl ChainLaserActor { &self.subscriptions, &mut self.active_subscriptions, &mut self.active_subscription_pubkeys, - &mut self.program_subscriptions, + &mut self.stream_manager, &self.abort_sender, &self.client_id, ) @@ -347,8 +344,10 @@ impl ChainLaserActor { false } ProgramSubscribe { pubkey, response } => { - let commitment = self.commitment; - self.add_program_sub(pubkey, commitment); + self.stream_manager.add_program_subscription( + pubkey, + &self.commitment, + ); let _ = response.send(Ok(())).inspect_err(|_| { warn!(client_id = self.client_id, program_id = %pubkey, "Failed to send program subscribe response"); }); @@ -372,7 +371,7 @@ impl ChainLaserActor { &self.subscriptions, &mut self.active_subscriptions, &mut self.active_subscription_pubkeys, - &mut self.program_subscriptions, + &mut self.stream_manager, ); let _ = response.send(Ok(())).inspect_err(|_| { warn!( @@ -480,7 +479,7 @@ impl ChainLaserActor { } for (idx, chunk) in chunks.into_iter().enumerate() { - let stream = self.stream_manager.account_subscribe( + let stream = self.stream_manager.account_subscribe_old( &chunk, &self.commitment, idx, @@ -532,38 +531,6 @@ impl ChainLaserActor { Some((chain_slot, from_slot)) } - fn add_program_sub( - &mut self, - program_id: Pubkey, - commitment: CommitmentLevel, - ) { - if self - .program_subscriptions - .as_ref() - .map(|(subscribed_programs, _)| { - subscribed_programs.contains(&program_id) - }) - .unwrap_or(false) - { - trace!(program_id = %program_id, "Program subscription already exists"); - return; - } - - let mut subscribed_programs = self - .program_subscriptions - .as_ref() - .map(|x| x.0.iter().cloned().collect::>()) - .unwrap_or_default(); - - subscribed_programs.insert(program_id); - - let program_ids: Vec<&Pubkey> = subscribed_programs.iter().collect(); - let stream = self - .stream_manager - .program_subscribe(&program_ids, &commitment); - self.program_subscriptions = Some((subscribed_programs, stream)); - } - /// Handles an update from one of the account data streams. #[instrument(skip(self), fields(client_id = %self.client_id, stream_index = %idx))] async fn handle_account_update( @@ -618,7 +585,7 @@ impl ChainLaserActor { &self.subscriptions, &mut self.active_subscriptions, &mut self.active_subscription_pubkeys, - &mut self.program_subscriptions, + &mut self.stream_manager, &self.abort_sender, &self.client_id, ) @@ -689,24 +656,24 @@ impl ChainLaserActor { subscriptions: &SharedSubscriptions, active_subscriptions: &mut StreamMap, active_subscription_pubkeys: &mut HashSet, - program_subscriptions: &mut Option<(HashSet, LaserStream)>, + stream_manager: &mut StreamManager, ) { subscriptions.write().clear(); active_subscriptions.clear(); active_subscription_pubkeys.clear(); - *program_subscriptions = None; + stream_manager.clear_program_subscriptions(); } /// Signals a connection issue by clearing all subscriptions and /// sending a message on the abort channel. /// NOTE: the laser client should handle reconnects internally, but /// we add this as a backup in case it is unable to do so - #[instrument(skip(subscriptions, active_subscriptions, active_subscription_pubkeys, program_subscriptions, abort_sender), fields(client_id = %client_id))] + #[instrument(skip(subscriptions, active_subscriptions, active_subscription_pubkeys, stream_manager, abort_sender), fields(client_id = %client_id))] async fn signal_connection_issue( subscriptions: &SharedSubscriptions, active_subscriptions: &mut StreamMap, active_subscription_pubkeys: &mut HashSet, - program_subscriptions: &mut Option<(HashSet, LaserStream)>, + stream_manager: &mut StreamManager, abort_sender: &mpsc::Sender<()>, client_id: &str, ) { @@ -725,7 +692,7 @@ impl ChainLaserActor { subscriptions, active_subscriptions, active_subscription_pubkeys, - program_subscriptions, + stream_manager, ); // Use try_send to avoid blocking and naturally coalesce signals diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 64a5f3cc1..131fad6fb 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use helius_laserstream::grpc::{ CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, @@ -6,17 +6,25 @@ use helius_laserstream::grpc::{ }; use solana_pubkey::Pubkey; -use super::StreamFactory; +use super::{LaserStream, StreamFactory}; /// Manages the creation and lifecycle of GRPC laser streams. pub struct StreamManager { stream_factory: S, + /// Active streams for program subscriptions + program_subscriptions: Option<(HashSet, LaserStream)>, + /// Active streams for account subscriptions + account_streams: Vec, } impl StreamManager { /// Creates a new stream manager with the given stream factory. pub fn new(stream_factory: S) -> Self { - Self { stream_factory } + Self { + stream_factory, + program_subscriptions: None, + account_streams: Vec::new(), + } } /// Creates a subscription stream for account updates. @@ -25,7 +33,26 @@ impl StreamManager { /// just for slot updates. /// NOTE: no slot update subscription will be created until the first /// accounts subscription is created. + #[allow(unused)] pub fn account_subscribe( + &mut self, + pubkeys: &[&Pubkey], + commitment: &CommitmentLevel, + idx: usize, + from_slot: Option, + ) { + let stream = + self.account_subscribe_old(pubkeys, commitment, idx, from_slot); + self.account_streams.push(stream); + } + + /// Creates a subscription stream for account updates. + /// It includes a slot subscription for chain slot synchronization. + /// This is not 100% cleanly separated but avoids creating another connection + /// just for slot updates. + /// NOTE: no slot update subscription will be created until the first + /// accounts subscription is created. + pub fn account_subscribe_old( &self, pubkeys: &[&Pubkey], commitment: &CommitmentLevel, @@ -63,12 +90,57 @@ impl StreamManager { self.stream_factory.subscribe(request) } + /// Adds a program subscription. If the program is already subscribed, + /// this is a no-op. Otherwise, recreates the program stream to include + /// all subscribed programs. + pub fn add_program_subscription( + &mut self, + program_id: Pubkey, + commitment: &CommitmentLevel, + ) { + if self + .program_subscriptions + .as_ref() + .is_some_and(|(subs, _)| subs.contains(&program_id)) + { + return; + } + + let mut subscribed_programs = self + .program_subscriptions + .as_ref() + .map(|(subs, _)| subs.clone()) + .unwrap_or_default(); + + subscribed_programs.insert(program_id); + + let program_ids: Vec<&Pubkey> = subscribed_programs.iter().collect(); + let stream = self.create_program_stream(&program_ids, commitment); + self.program_subscriptions = Some((subscribed_programs, stream)); + } + + /// Returns a mutable reference to the program subscriptions stream + /// (if any) for polling in the actor loop. + pub fn program_stream_mut(&mut self) -> Option<&mut LaserStream> { + self.program_subscriptions.as_mut().map(|(_, s)| s) + } + + /// Returns whether there are active program subscriptions. + pub fn has_program_subscriptions(&self) -> bool { + self.program_subscriptions.is_some() + } + + /// Clears all program subscriptions. + pub fn clear_program_subscriptions(&mut self) { + self.program_subscriptions = None; + } + /// Creates a subscription stream for program updates. - pub fn program_subscribe( + fn create_program_stream( &self, program_ids: &[&Pubkey], commitment: &CommitmentLevel, - ) -> super::LaserStream { + ) -> LaserStream { let mut accounts = HashMap::new(); accounts.insert( "program_sub".to_string(), From c99b2fced278feb75ffcee24d57380005066d567 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 16 Feb 2026 16:59:12 +0700 Subject: [PATCH 43/64] chore: initial tests --- .../chain_laser_actor/actor.rs | 15 +- .../chain_laser_actor/mod.rs | 2 +- .../chain_laser_actor/stream_manager.rs | 348 ++++++++++++++++-- 3 files changed, 327 insertions(+), 38 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 7280c3446..1cc1ff7dd 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -30,7 +30,9 @@ use tokio_stream::StreamMap; use tonic::Code; use tracing::*; -use super::{LaserResult, LaserStream, StreamFactory, StreamManager}; +use super::{ + LaserResult, LaserStream, StreamFactory, StreamManager, StreamManagerConfig, +}; use crate::remote_account_provider::{ chain_rpc_client::{ChainRpcClient, ChainRpcClientImpl}, chain_slot::ChainSlot, @@ -227,7 +229,10 @@ impl ChainLaserActor { let shared_subscriptions = Arc::clone(&subscriptions); let me = Self { - stream_manager: StreamManager::new(stream_factory), + stream_manager: StreamManager::new( + StreamManagerConfig::default(), + stream_factory, + ), messages_receiver, subscriptions, active_subscriptions: Default::default(), @@ -344,10 +349,8 @@ impl ChainLaserActor { false } ProgramSubscribe { pubkey, response } => { - self.stream_manager.add_program_subscription( - pubkey, - &self.commitment, - ); + self.stream_manager + .add_program_subscription(pubkey, &self.commitment); let _ = response.send(Ok(())).inspect_err(|_| { warn!(client_id = self.client_id, program_id = %pubkey, "Failed to send program subscribe response"); }); diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index cd930e400..cefa7a9b1 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -8,7 +8,7 @@ use helius_laserstream::{ pub use self::{ actor::{ChainLaserActor, SharedSubscriptions, Slots}, - stream_manager::StreamManager, + stream_manager::{StreamManager, StreamManagerConfig}, }; mod actor; diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 131fad6fb..8a111e957 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -8,50 +8,184 @@ use solana_pubkey::Pubkey; use super::{LaserStream, StreamFactory}; +/// Configuration for the generational stream manager. +#[allow(unused)] +pub struct StreamManagerConfig { + /// Max subscriptions per optimized old stream chunk. + pub max_subs_in_old_optimized: usize, + /// Max unoptimized old streams before optimization is triggered. + pub max_old_unoptimized: usize, + /// Max subscriptions in the current-new stream before it is + /// promoted to an unoptimized old stream. + pub max_subs_in_new: usize, +} + +impl Default for StreamManagerConfig { + fn default() -> Self { + Self { + max_subs_in_old_optimized: 2000, + max_old_unoptimized: 10, + max_subs_in_new: 200, + } + } +} + /// Manages the creation and lifecycle of GRPC laser streams. +/// +/// Account subscriptions follow a generational approach: +/// - New subscriptions go into the *current-new* stream. +/// - When the current-new stream exceeds `max_subs_in_new` it is +/// promoted to the *unoptimized old* streams vec and a fresh +/// current-new stream is created. +/// - When unoptimized old streams exceed `max_old_unoptimized`, +/// optimization is triggered which rebuilds all streams from the +/// `subscriptions` set into *optimized old* streams chunked by +/// `max_subs_in_old_optimized`. +/// +/// Unsubscribe only removes from the `subscriptions` HashSet — it +/// never touches streams. Updates for unsubscribed pubkeys are +/// ignored at the actor level. +#[allow(unused)] pub struct StreamManager { + config: StreamManagerConfig, stream_factory: S, + + // ----- Program subscriptions (unchanged) ----- /// Active streams for program subscriptions program_subscriptions: Option<(HashSet, LaserStream)>, - /// Active streams for account subscriptions - account_streams: Vec, + + // ----- Generational account subscriptions ----- + /// The canonical set of currently active account subscriptions. + subscriptions: HashSet, + /// Pubkeys that are part of the current-new stream's filter. + current_new_subs: HashSet, + /// The current-new stream (None until the first subscribe call). + current_new_stream: Option, + /// Old streams that have not been optimized yet. + unoptimized_old_streams: Vec, + /// Old streams created by optimization, each covering up to + /// `max_subs_in_old_optimized` subscriptions. + optimized_old_streams: Vec, } +#[allow(unused)] impl StreamManager { - /// Creates a new stream manager with the given stream factory. - pub fn new(stream_factory: S) -> Self { + /// Creates a new stream manager with the given config and stream + /// factory. + pub fn new(config: StreamManagerConfig, stream_factory: S) -> Self { Self { + config, stream_factory, program_subscriptions: None, - account_streams: Vec::new(), + subscriptions: HashSet::new(), + current_new_subs: HashSet::new(), + current_new_stream: None, + unoptimized_old_streams: Vec::new(), + optimized_old_streams: Vec::new(), } } - /// Creates a subscription stream for account updates. - /// It includes a slot subscription for chain slot synchronization. - /// This is not 100% cleanly separated but avoids creating another connection - /// just for slot updates. - /// NOTE: no slot update subscription will be created until the first - /// accounts subscription is created. - #[allow(unused)] + // --------------------------------------------------------- + // Account subscription — generational API (stubs) + // --------------------------------------------------------- + + /// Subscribe to account updates for the given pubkeys. + /// + /// Each pubkey is added to `subscriptions` and to the current-new + /// stream. If the current-new stream exceeds `max_subs_in_new` it + /// is promoted and a fresh one is created. If unoptimized old + /// streams exceed `max_old_unoptimized`, optimization is + /// triggered. pub fn account_subscribe( &mut self, - pubkeys: &[&Pubkey], - commitment: &CommitmentLevel, - idx: usize, - from_slot: Option, + _pubkeys: &[Pubkey], + _commitment: &CommitmentLevel, ) { - let stream = - self.account_subscribe_old(pubkeys, commitment, idx, from_slot); - self.account_streams.push(stream); + todo!("account_subscribe: generational implementation") + } + + /// Unsubscribe the given pubkeys. + /// + /// Removes them from the `subscriptions` HashSet only — streams + /// are never modified. Updates for these pubkeys will be ignored + /// by the actor. + pub fn account_unsubscribe(&mut self, _pubkeys: &[Pubkey]) { + todo!("account_unsubscribe") + } + + /// Rebuild all account streams from `subscriptions`. + /// + /// 1. Chunk `subscriptions` into groups of + /// `max_subs_in_old_optimized`. + /// 2. Create a new stream for each chunk → `optimized_old_streams`. + /// 3. Clear `unoptimized_old_streams`. + /// 4. Reset the current-new stream (empty filter). + pub fn optimize(&mut self, _commitment: &CommitmentLevel) { + todo!("optimize") + } + + /// Returns `true` if the pubkey is in the active `subscriptions` + /// set. + pub fn is_subscribed(&self, _pubkey: &Pubkey) -> bool { + todo!("is_subscribed") } - /// Creates a subscription stream for account updates. + // --------------------------------------------------------- + // Accessors — internal state inspection + // --------------------------------------------------------- + + /// Returns a reference to the canonical subscriptions set. + pub fn subscriptions(&self) -> &HashSet { + &self.subscriptions + } + + /// Returns the number of pubkeys in the current-new stream's + /// filter. + pub fn current_new_sub_count(&self) -> usize { + self.current_new_subs.len() + } + + /// Returns a reference to the current-new stream's pubkey set. + pub fn current_new_subs(&self) -> &HashSet { + &self.current_new_subs + } + + /// Returns the number of unoptimized old streams. + pub fn unoptimized_old_stream_count(&self) -> usize { + self.unoptimized_old_streams.len() + } + + /// Returns the number of optimized old streams. + pub fn optimized_old_stream_count(&self) -> usize { + self.optimized_old_streams.len() + } + + /// Returns mutable references to all account streams (optimized + /// old + unoptimized old + current-new) for polling. + pub fn all_account_streams_mut(&mut self) -> Vec<&mut LaserStream> { + todo!("all_account_streams_mut") + } + + /// Returns the total number of account streams across all + /// generations. + pub fn account_stream_count(&self) -> usize { + let current = if self.current_new_stream.is_some() { + 1 + } else { + 0 + }; + self.optimized_old_streams.len() + + self.unoptimized_old_streams.len() + + current + } + + // ========================================================= + // Legacy account subscribe (kept for migration) + // ========================================================= + + /// Creates a subscription stream for account updates (legacy). + /// /// It includes a slot subscription for chain slot synchronization. - /// This is not 100% cleanly separated but avoids creating another connection - /// just for slot updates. - /// NOTE: no slot update subscription will be created until the first - /// accounts subscription is created. pub fn account_subscribe_old( &self, pubkeys: &[&Pubkey], @@ -82,17 +216,15 @@ impl StreamManager { accounts, slots, commitment: Some((*commitment).into()), - // NOTE: triton does not support backfilling and we could not verify this with - // helius due to being rate limited. from_slot, ..Default::default() }; self.stream_factory.subscribe(request) } - /// Adds a program subscription. If the program is already subscribed, - /// this is a no-op. Otherwise, recreates the program stream to include - /// all subscribed programs. + /// Adds a program subscription. If the program is already + /// subscribed, this is a no-op. Otherwise, recreates the program + /// stream to include all subscribed programs. pub fn add_program_subscription( &mut self, program_id: Pubkey, @@ -119,8 +251,8 @@ impl StreamManager { self.program_subscriptions = Some((subscribed_programs, stream)); } - /// Returns a mutable reference to the program subscriptions stream - /// (if any) for polling in the actor loop. + /// Returns a mutable reference to the program subscriptions + /// stream (if any) for polling in the actor loop. pub fn program_stream_mut(&mut self) -> Option<&mut LaserStream> { self.program_subscriptions.as_mut().map(|(_, s)| s) } @@ -157,3 +289,157 @@ impl StreamManager { self.stream_factory.subscribe(request) } } + +#[cfg(test)] +mod tests { + use helius_laserstream::grpc::CommitmentLevel; + use solana_pubkey::Pubkey; + + use super::*; + use crate::remote_account_provider::chain_laser_actor::mock::MockStreamFactory; + + // ----------------- + // Helpers + // ----------------- + fn test_config() -> StreamManagerConfig { + StreamManagerConfig { + max_subs_in_old_optimized: 10, + max_old_unoptimized: 3, + max_subs_in_new: 5, + } + } + + fn create_manager() -> (StreamManager, MockStreamFactory) { + let factory = MockStreamFactory::new(); + let manager = StreamManager::new(test_config(), factory.clone()); + (manager, factory) + } + + fn make_pubkeys(n: usize) -> Vec { + (0..n).map(|_| Pubkey::new_unique()).collect() + } + + /// Collect all account pubkey strings from a captured + /// `SubscribeRequest`'s account filters. + fn account_pubkeys_from_request(req: &SubscribeRequest) -> HashSet { + req.accounts + .values() + .flat_map(|f| f.account.iter().cloned()) + .collect() + } + + /// Assert that `subscriptions()` contains exactly `expected` + /// (order-independent, exact count). + fn assert_subscriptions_eq( + mgr: &StreamManager, + expected: &[Pubkey], + ) { + let subs = mgr.subscriptions(); + assert_eq!( + subs.len(), + expected.len(), + "expected {} subscriptions, got {}", + expected.len(), + subs.len(), + ); + for pk in expected { + assert!(subs.contains(pk), "subscription set missing pubkey {pk}",); + } + } + + /// Assert that a `SubscribeRequest` filter contains exactly the + /// given pubkeys (order-independent, exact count). + fn assert_request_has_exact_pubkeys( + req: &SubscribeRequest, + expected: &[Pubkey], + ) { + let filter = account_pubkeys_from_request(req); + assert_eq!( + filter.len(), + expected.len(), + "expected {} pubkeys in filter, got {}", + expected.len(), + filter.len(), + ); + for pk in expected { + assert!( + filter.contains(&pk.to_string()), + "request filter missing pubkey {pk}", + ); + } + } + + // ------------------------------------------------------------- + // 1. Subscription Tracking + // ------------------------------------------------------------- + + #[test] + fn test_subscribe_single_pubkey_adds_to_subscriptions() { + let (mut mgr, factory) = create_manager(); + let pk = Pubkey::new_unique(); + let commitment = CommitmentLevel::Processed; + + mgr.account_subscribe(&[pk], &commitment); + + assert_subscriptions_eq(&mgr, &[pk]); + + // Exactly one subscribe call (initial current-new stream) + // whose filter contains exactly this pubkey. + let reqs = factory.captured_requests(); + assert_eq!(reqs.len(), 1); + assert_request_has_exact_pubkeys(&reqs[0], &[pk]); + } + + #[test] + fn test_subscribe_multiple_pubkeys_at_once() { + let (mut mgr, factory) = create_manager(); + let pks = make_pubkeys(5); + let commitment = CommitmentLevel::Processed; + + mgr.account_subscribe(&pks, &commitment); + + assert_subscriptions_eq(&mgr, &pks); + + // One stream created whose filter contains all 5. + let reqs = factory.captured_requests(); + assert_eq!(reqs.len(), 1); + assert_request_has_exact_pubkeys(&reqs[0], &pks); + } + + #[test] + fn test_subscribe_duplicate_pubkey_is_noop() { + let (mut mgr, factory) = create_manager(); + let pk = Pubkey::new_unique(); + let commitment = CommitmentLevel::Processed; + + mgr.account_subscribe(&[pk], &commitment); + let calls_after_first = factory.captured_requests().len(); + + // Subscribe the same pubkey again. + mgr.account_subscribe(&[pk], &commitment); + + assert_subscriptions_eq(&mgr, &[pk]); + + // No extra streams spawned. + assert_eq!(factory.captured_requests().len(), calls_after_first,); + } + + #[test] + fn test_subscribe_incremental_calls_accumulate() { + let (mut mgr, factory) = create_manager(); + let pks = make_pubkeys(3); + let commitment = CommitmentLevel::Processed; + + mgr.account_subscribe(&[pks[0]], &commitment); + mgr.account_subscribe(&[pks[1]], &commitment); + mgr.account_subscribe(&[pks[2]], &commitment); + + assert_subscriptions_eq(&mgr, &pks); + + // The most recent factory call's filter should contain all + // three pubkeys. + let reqs = factory.captured_requests(); + let last_req = reqs.last().unwrap(); + assert_request_has_exact_pubkeys(last_req, &pks); + } +} From 64eff4ef699c877d70a8851282e5e8b4679b9b60 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 16 Feb 2026 17:22:38 +0700 Subject: [PATCH 44/64] chore: pass initial set of tests --- .../chain_laser_actor/stream_manager.rs | 177 +++++++++++++++--- 1 file changed, 150 insertions(+), 27 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 8a111e957..37bc95ac3 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -98,10 +98,51 @@ impl StreamManager { /// triggered. pub fn account_subscribe( &mut self, - _pubkeys: &[Pubkey], - _commitment: &CommitmentLevel, + pubkeys: &[Pubkey], + commitment: &CommitmentLevel, ) { - todo!("account_subscribe: generational implementation") + // Filter out pubkeys already in subscriptions. + let new_pks: Vec = pubkeys + .iter() + .filter(|pk| !self.subscriptions.contains(pk)) + .copied() + .collect(); + + if new_pks.is_empty() { + return; + } + + for pk in &new_pks { + self.subscriptions.insert(*pk); + self.current_new_subs.insert(*pk); + } + + // (Re)create the current-new stream with the full + // current_new_subs filter. + self.current_new_stream = + Some(self.create_account_stream( + &self.current_new_subs.iter().collect::>(), + commitment, + )); + + // Promote if current-new exceeds threshold. + if self.current_new_subs.len() > self.config.max_subs_in_new { + // Move current-new stream to unoptimized old. + if let Some(stream) = self.current_new_stream.take() { + self.unoptimized_old_streams.push(stream); + } + self.current_new_subs.clear(); + + // Create a fresh empty current-new stream. + self.current_new_stream = None; + + // If unoptimized old streams exceed the limit, optimize. + if self.unoptimized_old_streams.len() + > self.config.max_old_unoptimized + { + self.optimize(commitment); + } + } } /// Unsubscribe the given pubkeys. @@ -109,8 +150,10 @@ impl StreamManager { /// Removes them from the `subscriptions` HashSet only — streams /// are never modified. Updates for these pubkeys will be ignored /// by the actor. - pub fn account_unsubscribe(&mut self, _pubkeys: &[Pubkey]) { - todo!("account_unsubscribe") + pub fn account_unsubscribe(&mut self, pubkeys: &[Pubkey]) { + for pk in pubkeys { + self.subscriptions.remove(pk); + } } /// Rebuild all account streams from `subscriptions`. @@ -126,8 +169,8 @@ impl StreamManager { /// Returns `true` if the pubkey is in the active `subscriptions` /// set. - pub fn is_subscribed(&self, _pubkey: &Pubkey) -> bool { - todo!("is_subscribed") + pub fn is_subscribed(&self, pubkey: &Pubkey) -> bool { + self.subscriptions.contains(pubkey) } // --------------------------------------------------------- @@ -179,6 +222,48 @@ impl StreamManager { + current } + // --------------------------------------------------------- + // Internal helpers + // --------------------------------------------------------- + + /// Build a `SubscribeRequest` and call the factory for the given + /// account pubkeys. Includes a slot subscription for chain slot + /// synchronisation (matching the legacy path). + fn create_account_stream( + &self, + pubkeys: &[&Pubkey], + commitment: &CommitmentLevel, + ) -> LaserStream { + let mut accounts = HashMap::new(); + accounts.insert( + "account_subs".to_string(), + SubscribeRequestFilterAccounts { + account: pubkeys + .iter() + .map(|pk| pk.to_string()) + .collect(), + ..Default::default() + }, + ); + + let mut slots = HashMap::new(); + slots.insert( + "slot_updates".to_string(), + SubscribeRequestFilterSlots { + filter_by_commitment: Some(true), + ..Default::default() + }, + ); + + let request = SubscribeRequest { + accounts, + slots, + commitment: Some((*commitment).into()), + ..Default::default() + }; + self.stream_factory.subscribe(request) + } + // ========================================================= // Legacy account subscribe (kept for migration) // ========================================================= @@ -369,6 +454,56 @@ mod tests { } } + // --------------------------------------------------------- + // Additional helpers + // --------------------------------------------------------- + + const COMMITMENT: CommitmentLevel = CommitmentLevel::Processed; + + /// Subscribe `n` pubkeys one-at-a-time, returning the created + /// pubkeys. + fn subscribe_n( + mgr: &mut StreamManager, + n: usize, + ) -> Vec { + let pks = make_pubkeys(n); + mgr.account_subscribe(&pks, &COMMITMENT); + pks + } + + /// Subscribe pubkeys in batches of `batch` until `total` pubkeys + /// have been subscribed. Returns all created pubkeys. + fn subscribe_in_batches( + mgr: &mut StreamManager, + total: usize, + batch: usize, + ) -> Vec { + let mut all = Vec::new(); + let mut remaining = total; + while remaining > 0 { + let n = remaining.min(batch); + let pks = make_pubkeys(n); + mgr.account_subscribe(&pks, &COMMITMENT); + all.extend(pks); + remaining -= n; + } + all + } + + /// Returns the union of all account pubkey strings across all + /// captured requests from `start_idx` onward. + fn all_filter_pubkeys_from( + factory: &MockStreamFactory, + start_idx: usize, + ) -> HashSet { + factory + .captured_requests() + .iter() + .skip(start_idx) + .flat_map(|r| account_pubkeys_from_request(r)) + .collect() + } + // ------------------------------------------------------------- // 1. Subscription Tracking // ------------------------------------------------------------- @@ -377,14 +512,11 @@ mod tests { fn test_subscribe_single_pubkey_adds_to_subscriptions() { let (mut mgr, factory) = create_manager(); let pk = Pubkey::new_unique(); - let commitment = CommitmentLevel::Processed; - mgr.account_subscribe(&[pk], &commitment); + mgr.account_subscribe(&[pk], &COMMITMENT); assert_subscriptions_eq(&mgr, &[pk]); - // Exactly one subscribe call (initial current-new stream) - // whose filter contains exactly this pubkey. let reqs = factory.captured_requests(); assert_eq!(reqs.len(), 1); assert_request_has_exact_pubkeys(&reqs[0], &[pk]); @@ -394,13 +526,11 @@ mod tests { fn test_subscribe_multiple_pubkeys_at_once() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(5); - let commitment = CommitmentLevel::Processed; - mgr.account_subscribe(&pks, &commitment); + mgr.account_subscribe(&pks, &COMMITMENT); assert_subscriptions_eq(&mgr, &pks); - // One stream created whose filter contains all 5. let reqs = factory.captured_requests(); assert_eq!(reqs.len(), 1); assert_request_has_exact_pubkeys(&reqs[0], &pks); @@ -410,34 +540,27 @@ mod tests { fn test_subscribe_duplicate_pubkey_is_noop() { let (mut mgr, factory) = create_manager(); let pk = Pubkey::new_unique(); - let commitment = CommitmentLevel::Processed; - mgr.account_subscribe(&[pk], &commitment); + mgr.account_subscribe(&[pk], &COMMITMENT); let calls_after_first = factory.captured_requests().len(); - // Subscribe the same pubkey again. - mgr.account_subscribe(&[pk], &commitment); + mgr.account_subscribe(&[pk], &COMMITMENT); assert_subscriptions_eq(&mgr, &[pk]); - - // No extra streams spawned. - assert_eq!(factory.captured_requests().len(), calls_after_first,); + assert_eq!(factory.captured_requests().len(), calls_after_first); } #[test] fn test_subscribe_incremental_calls_accumulate() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(3); - let commitment = CommitmentLevel::Processed; - mgr.account_subscribe(&[pks[0]], &commitment); - mgr.account_subscribe(&[pks[1]], &commitment); - mgr.account_subscribe(&[pks[2]], &commitment); + mgr.account_subscribe(&[pks[0]], &COMMITMENT); + mgr.account_subscribe(&[pks[1]], &COMMITMENT); + mgr.account_subscribe(&[pks[2]], &COMMITMENT); assert_subscriptions_eq(&mgr, &pks); - // The most recent factory call's filter should contain all - // three pubkeys. let reqs = factory.captured_requests(); let last_req = reqs.last().unwrap(); assert_request_has_exact_pubkeys(last_req, &pks); From cca4244dc70594801e7013eaaf59aa15f473c84f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 16 Feb 2026 17:23:20 +0700 Subject: [PATCH 45/64] chore: add remaining tests (not passing) --- .../chain_laser_actor/stream_manager.rs | 583 ++++++++++++++++++ 1 file changed, 583 insertions(+) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 37bc95ac3..6c4f852b0 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -565,4 +565,587 @@ mod tests { let last_req = reqs.last().unwrap(); assert_request_has_exact_pubkeys(last_req, &pks); } + + // ------------------------------------------------------------- + // 2. Current-New Stream Lifecycle + // ------------------------------------------------------------- + + #[test] + fn test_new_stream_created_on_first_subscribe() { + let (mut mgr, factory) = create_manager(); + assert_eq!(mgr.account_stream_count(), 0); + + subscribe_n(&mut mgr, 1); + + assert_eq!(mgr.account_stream_count(), 1); + assert_eq!(factory.active_stream_count(), 1); + } + + #[test] + fn test_current_new_stream_stays_below_threshold() { + let (mut mgr, _factory) = create_manager(); + // MAX_NEW - 1 = 4 + subscribe_in_batches(&mut mgr, 4, 2); + + assert_eq!(mgr.account_stream_count(), 1); + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + } + + #[test] + fn test_current_new_stream_promoted_at_threshold() { + let (mut mgr, factory) = create_manager(); + // Subscribe MAX_NEW (5) pubkeys first. + let first_five = make_pubkeys(5); + mgr.account_subscribe(&first_five, &COMMITMENT); + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + + // Subscribe the 6th pubkey → triggers promotion. + let sixth = Pubkey::new_unique(); + mgr.account_subscribe(&[sixth], &COMMITMENT); + + assert_eq!(mgr.unoptimized_old_stream_count(), 1); + // A new current-new stream was created for the 6th pubkey. + assert!(mgr.current_new_subs().contains(&sixth)); + // The factory received a new subscribe call for the fresh + // current-new stream. + let reqs = factory.captured_requests(); + assert!(reqs.len() >= 2); + } + + #[test] + fn test_multiple_promotions_accumulate_unoptimized() { + let (mut mgr, _factory) = create_manager(); + // First promotion: subscribe 6 pubkeys (exceeds MAX_NEW=5). + subscribe_n(&mut mgr, 6); + assert_eq!(mgr.unoptimized_old_stream_count(), 1); + + // Second promotion: subscribe 5 more to fill the new current, + // then 1 more to exceed. + subscribe_n(&mut mgr, 5); + assert_eq!(mgr.unoptimized_old_stream_count(), 2); + + // Current-new stream should only hold the overflow pubkeys. + assert!(mgr.current_new_sub_count() <= 1); + } + + // ------------------------------------------------------------- + // 3. Optimization Trigger via MAX_OLD_UNOPTIMIZED + // ------------------------------------------------------------- + + #[test] + fn test_optimization_triggered_when_unoptimized_exceeds_max() { + let (mut mgr, _factory) = create_manager(); + // MAX_OLD_UNOPTIMIZED = 3. We need 4 promotions. + // Each promotion needs > MAX_NEW (5) pubkeys in current-new. + // Subscribe 6 four times → 4 promotions. + for _ in 0..3 { + subscribe_n(&mut mgr, 6); + } + assert_eq!(mgr.unoptimized_old_stream_count(), 3); + + // 4th promotion triggers optimization. + subscribe_n(&mut mgr, 6); + + // After optimization: unoptimized should be empty. + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + // Optimized old streams should exist. + let total_subs = mgr.subscriptions().len(); + let expected_optimized = + (total_subs + 9) / 10; // ceil(total / MAX_OLD_OPTIMIZED) + assert_eq!( + mgr.optimized_old_stream_count(), + expected_optimized, + ); + } + + #[test] + fn test_optimization_not_triggered_below_max_unoptimized() { + let (mut mgr, _factory) = create_manager(); + // Exactly MAX_OLD_UNOPTIMIZED (3) promotions. + for _ in 0..3 { + subscribe_n(&mut mgr, 6); + } + assert_eq!(mgr.unoptimized_old_stream_count(), 3); + assert_eq!(mgr.optimized_old_stream_count(), 0); + } + + // ------------------------------------------------------------- + // 4. Manual / Interval-Driven Optimization + // ------------------------------------------------------------- + + #[test] + fn test_optimize_creates_correct_number_of_optimized_streams() { + let (mut mgr, _factory) = create_manager(); + subscribe_n(&mut mgr, 25); + + mgr.optimize(&COMMITMENT); + + // ceil(25 / 10) = 3 + assert_eq!(mgr.optimized_old_stream_count(), 3); + } + + #[test] + fn test_optimize_clears_unoptimized_old_streams() { + let (mut mgr, _factory) = create_manager(); + // Create several unoptimized old streams. + for _ in 0..3 { + subscribe_n(&mut mgr, 6); + } + assert!(mgr.unoptimized_old_stream_count() > 0); + + mgr.optimize(&COMMITMENT); + + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + assert!(mgr.optimized_old_stream_count() > 0); + } + + #[test] + fn test_optimize_resets_current_new_stream() { + let (mut mgr, _factory) = create_manager(); + subscribe_n(&mut mgr, 8); + + mgr.optimize(&COMMITMENT); + + assert_eq!(mgr.current_new_sub_count(), 0); + } + + #[test] + fn test_optimize_excludes_unsubscribed_pubkeys() { + let (mut mgr, factory) = create_manager(); + let pks = subscribe_n(&mut mgr, 15); + + // Unsubscribe 5 of them. + let to_unsub: Vec = pks[0..5].to_vec(); + mgr.account_unsubscribe(&to_unsub); + + let reqs_before = factory.captured_requests().len(); + mgr.optimize(&COMMITMENT); + + // Optimized streams should only contain the 10 remaining + // pubkeys. + let remaining: HashSet = + pks[5..].iter().map(|pk| pk.to_string()).collect(); + let filter_pks = + all_filter_pubkeys_from(&factory, reqs_before); + assert_eq!(filter_pks.len(), 10); + for pk in &to_unsub { + assert!( + !filter_pks.contains(&pk.to_string()), + "unsubscribed pubkey {pk} found in optimized filter", + ); + } + for pk_str in &remaining { + assert!( + filter_pks.contains(pk_str), + "expected pubkey {pk_str} missing from optimized filter", + ); + } + } + + #[test] + fn test_optimize_with_zero_subscriptions() { + let (mut mgr, _factory) = create_manager(); + let pks = subscribe_n(&mut mgr, 5); + mgr.account_unsubscribe(&pks); + + mgr.optimize(&COMMITMENT); + + assert_eq!(mgr.optimized_old_stream_count(), 0); + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + } + + #[test] + fn test_optimize_idempotent() { + let (mut mgr, _factory) = create_manager(); + subscribe_n(&mut mgr, 15); + + mgr.optimize(&COMMITMENT); + let count_after_first = mgr.optimized_old_stream_count(); + + mgr.optimize(&COMMITMENT); + assert_eq!( + mgr.optimized_old_stream_count(), + count_after_first, + ); + } + + // ------------------------------------------------------------- + // 5. Behavior During Optimization + // ------------------------------------------------------------- + + #[test] + fn test_subscribe_during_optimization_goes_to_current_new() { + let (mut mgr, _factory) = create_manager(); + subscribe_n(&mut mgr, 20); + + mgr.optimize(&COMMITMENT); + + // Subscribe a new pubkey after optimization. + let new_pk = Pubkey::new_unique(); + mgr.account_subscribe(&[new_pk], &COMMITMENT); + + assert!(mgr.subscriptions().contains(&new_pk)); + assert!(mgr.current_new_subs().contains(&new_pk)); + } + + #[test] + fn test_no_double_optimization_trigger() { + let (mut mgr, _factory) = create_manager(); + // Fill up to MAX_OLD_UNOPTIMIZED. + for _ in 0..3 { + subscribe_n(&mut mgr, 6); + } + assert_eq!(mgr.unoptimized_old_stream_count(), 3); + + // 4th promotion triggers optimization. + subscribe_n(&mut mgr, 6); + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + let optimized_after_first = mgr.optimized_old_stream_count(); + + // Now subscribe enough to exceed MAX_SUBS_IN_NEW again, + // causing a promotion. Since optimization just ran, it should + // NOT trigger again immediately. + subscribe_n(&mut mgr, 6); + // Unoptimized grows by 1 but no second optimization. + assert!(mgr.unoptimized_old_stream_count() <= 1); + assert_eq!( + mgr.optimized_old_stream_count(), + optimized_after_first, + ); + } + + // ------------------------------------------------------------- + // 6. Unsubscribe + // ------------------------------------------------------------- + + #[test] + fn test_unsubscribe_removes_from_subscriptions_set() { + let (mut mgr, _factory) = create_manager(); + let pks = make_pubkeys(3); + mgr.account_subscribe(&pks, &COMMITMENT); + + mgr.account_unsubscribe(&[pks[1]]); + + assert_subscriptions_eq(&mgr, &[pks[0], pks[2]]); + } + + #[test] + fn test_unsubscribe_nonexistent_pubkey_is_noop() { + let (mut mgr, _factory) = create_manager(); + let random = Pubkey::new_unique(); + + mgr.account_unsubscribe(&[random]); + + assert!(mgr.subscriptions().is_empty()); + } + + #[test] + fn test_unsubscribe_already_unsubscribed_pubkey() { + let (mut mgr, _factory) = create_manager(); + let pk = Pubkey::new_unique(); + mgr.account_subscribe(&[pk], &COMMITMENT); + + mgr.account_unsubscribe(&[pk]); + mgr.account_unsubscribe(&[pk]); + + assert!(mgr.subscriptions().is_empty()); + } + + #[test] + fn test_unsubscribe_does_not_modify_streams() { + let (mut mgr, factory) = create_manager(); + let pks = make_pubkeys(4); + mgr.account_subscribe(&pks, &COMMITMENT); + let calls_before = factory.captured_requests().len(); + + mgr.account_unsubscribe(&pks[0..2]); + + // No new factory calls after unsubscribe. + assert_eq!(factory.captured_requests().len(), calls_before); + // Current-new subs still contain all 4 (streams not updated). + for pk in &pks { + assert!(mgr.current_new_subs().contains(pk)); + } + } + + #[test] + fn test_unsubscribe_all_then_optimize_clears_streams() { + let (mut mgr, _factory) = create_manager(); + // Subscribe 8 pubkeys (creates current-new + 1 unoptimized). + let pks = subscribe_n(&mut mgr, 8); + mgr.account_unsubscribe(&pks); + + mgr.optimize(&COMMITMENT); + + assert_eq!(mgr.optimized_old_stream_count(), 0); + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + } + + #[test] + fn test_unsubscribe_batch() { + let (mut mgr, factory) = create_manager(); + let pks = make_pubkeys(5); + mgr.account_subscribe(&pks, &COMMITMENT); + let calls_before = factory.captured_requests().len(); + + mgr.account_unsubscribe(&[pks[0], pks[2], pks[4]]); + + assert_subscriptions_eq(&mgr, &[pks[1], pks[3]]); + assert_eq!(factory.captured_requests().len(), calls_before); + } + + // ------------------------------------------------------------- + // 7. Subscription Membership Check + // ------------------------------------------------------------- + + #[test] + fn test_is_subscribed_returns_true_for_active() { + let (mut mgr, _factory) = create_manager(); + let pk = Pubkey::new_unique(); + mgr.account_subscribe(&[pk], &COMMITMENT); + + assert!(mgr.is_subscribed(&pk)); + } + + #[test] + fn test_is_subscribed_returns_false_after_unsubscribe() { + let (mut mgr, _factory) = create_manager(); + let pk = Pubkey::new_unique(); + mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_unsubscribe(&[pk]); + + assert!(!mgr.is_subscribed(&pk)); + } + + #[test] + fn test_is_subscribed_returns_false_for_never_subscribed() { + let (mgr, _factory) = create_manager(); + let random = Pubkey::new_unique(); + + assert!(!mgr.is_subscribed(&random)); + } + + // ------------------------------------------------------------- + // 8. Stream Enumeration / Polling Access + // ------------------------------------------------------------- + + #[test] + fn test_all_account_streams_includes_all_generations() { + let (mut mgr, _factory) = create_manager(); + // Create optimized old streams. + subscribe_n(&mut mgr, 15); + mgr.optimize(&COMMITMENT); + + // Create an unoptimized old stream via promotion. + subscribe_n(&mut mgr, 6); + + // Current-new also exists from the overflow pubkey. + let streams = mgr.all_account_streams_mut(); + let expected = mgr.optimized_old_stream_count() + + mgr.unoptimized_old_stream_count() + + if mgr.current_new_sub_count() > 0 { + 1 + } else { + 0 + }; + assert_eq!(streams.len(), expected); + } + + #[test] + fn test_all_account_streams_empty_when_no_subscriptions() { + let (mut mgr, _factory) = create_manager(); + + let streams = mgr.all_account_streams_mut(); + assert!(streams.is_empty()); + } + + #[test] + fn test_all_account_streams_after_optimize_drops_old_unoptimized() + { + let (mut mgr, _factory) = create_manager(); + // Create unoptimized old streams. + for _ in 0..2 { + subscribe_n(&mut mgr, 6); + } + assert!(mgr.unoptimized_old_stream_count() > 0); + + mgr.optimize(&COMMITMENT); + + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + let streams = mgr.all_account_streams_mut(); + // Only optimized old streams remain (current-new is empty + // after optimize). + assert_eq!(streams.len(), mgr.optimized_old_stream_count()); + } + + // ------------------------------------------------------------- + // 9. Edge Cases and Stress + // ------------------------------------------------------------- + + #[test] + fn test_subscribe_exactly_at_max_subs_in_new_no_promotion() { + let (mut mgr, _factory) = create_manager(); + // Exactly MAX_NEW (5) pubkeys — should NOT promote. + subscribe_n(&mut mgr, 5); + + assert_eq!(mgr.unoptimized_old_stream_count(), 0); + assert_eq!(mgr.account_stream_count(), 1); + } + + #[test] + fn test_single_pubkey_optimization() { + let (mut mgr, _factory) = create_manager(); + subscribe_n(&mut mgr, 1); + + mgr.optimize(&COMMITMENT); + + assert_eq!(mgr.optimized_old_stream_count(), 1); + assert_eq!(mgr.current_new_sub_count(), 0); + } + + #[test] + fn test_subscribe_max_old_optimized_plus_one() { + let (mut mgr, _factory) = create_manager(); + // MAX_OLD_OPTIMIZED + 1 = 11 + subscribe_n(&mut mgr, 11); + + mgr.optimize(&COMMITMENT); + + assert_eq!(mgr.optimized_old_stream_count(), 2); + } + + #[test] + fn test_large_scale_subscribe_and_optimize() { + let (mut mgr, factory) = create_manager(); + let pks = subscribe_n(&mut mgr, 50); + + let reqs_before = factory.captured_requests().len(); + mgr.optimize(&COMMITMENT); + + // ceil(50 / 10) = 5 + assert_eq!(mgr.optimized_old_stream_count(), 5); + assert_eq!(mgr.subscriptions().len(), 50); + assert_eq!(mgr.current_new_sub_count(), 0); + + // Verify the union of all optimized stream filters equals all + // 50 pubkeys. + let filter_pks = + all_filter_pubkeys_from(&factory, reqs_before); + assert_eq!(filter_pks.len(), 50); + for pk in &pks { + assert!(filter_pks.contains(&pk.to_string())); + } + } + + #[test] + fn test_interleaved_subscribe_unsubscribe_then_optimize() { + let (mut mgr, factory) = create_manager(); + let pks = subscribe_n(&mut mgr, 20); + // Unsubscribe 8 scattered. + let unsub1: Vec = + pks.iter().step_by(2).take(8).copied().collect(); + mgr.account_unsubscribe(&unsub1); + + // Subscribe 5 new ones. + let new_pks = subscribe_n(&mut mgr, 5); + // Unsubscribe 2 of the new ones. + mgr.account_unsubscribe(&new_pks[0..2]); + + let expected_count = 20 - 8 + 5 - 2; + assert_eq!(mgr.subscriptions().len(), expected_count); + + let reqs_before = factory.captured_requests().len(); + mgr.optimize(&COMMITMENT); + + let filter_pks = + all_filter_pubkeys_from(&factory, reqs_before); + assert_eq!(filter_pks.len(), expected_count); + // Verify unsubscribed pubkeys are absent. + for pk in &unsub1 { + assert!(!filter_pks.contains(&pk.to_string())); + } + for pk in &new_pks[0..2] { + assert!(!filter_pks.contains(&pk.to_string())); + } + } + + #[test] + fn test_rapid_subscribe_unsubscribe_same_pubkey() { + let (mut mgr, _factory) = create_manager(); + let pk = Pubkey::new_unique(); + + mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_unsubscribe(&[pk]); + mgr.account_subscribe(&[pk], &COMMITMENT); + + assert!(mgr.subscriptions().contains(&pk)); + assert!(mgr.current_new_subs().contains(&pk)); + } + + // ------------------------------------------------------------- + // 10. Stream Factory Interaction Verification + // ------------------------------------------------------------- + + #[test] + fn test_factory_called_with_correct_commitment() { + let (mut mgr, factory) = create_manager(); + let commitment = CommitmentLevel::Finalized; + let pk = Pubkey::new_unique(); + + mgr.account_subscribe(&[pk], &commitment); + + let reqs = factory.captured_requests(); + assert_eq!(reqs.len(), 1); + assert_eq!( + reqs[0].commitment, + Some(i32::from(CommitmentLevel::Finalized)), + ); + } + + #[test] + fn test_factory_called_with_slot_filter() { + let (mut mgr, factory) = create_manager(); + subscribe_n(&mut mgr, 1); + + let reqs = factory.captured_requests(); + assert!(!reqs[0].slots.is_empty()); + } + + #[test] + fn test_optimize_factory_calls_contain_chunked_pubkeys() { + let (mut mgr, factory) = create_manager(); + subscribe_n(&mut mgr, 15); + + let reqs_before = factory.captured_requests().len(); + mgr.optimize(&COMMITMENT); + + let optimize_reqs: Vec<_> = factory + .captured_requests() + .into_iter() + .skip(reqs_before) + .collect(); + assert_eq!(optimize_reqs.len(), 2); + + let first_pks = account_pubkeys_from_request(&optimize_reqs[0]); + let second_pks = + account_pubkeys_from_request(&optimize_reqs[1]); + assert_eq!(first_pks.len(), 10); + assert_eq!(second_pks.len(), 5); + + // No overlap. + assert!(first_pks.is_disjoint(&second_pks)); + } + + #[test] + fn test_factory_not_called_on_unsubscribe() { + let (mut mgr, factory) = create_manager(); + subscribe_n(&mut mgr, 5); + let calls_before = factory.captured_requests().len(); + + let pks: Vec = + mgr.subscriptions().iter().take(3).copied().collect(); + mgr.account_unsubscribe(&pks); + + assert_eq!(factory.captured_requests().len(), calls_before); + } } From 2df5ea61ad3d6772e68415f4f8b126873126b795 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 16 Feb 2026 17:34:35 +0700 Subject: [PATCH 46/64] feat: initial impl to get all tests to pass --- .../chain_laser_actor/stream_manager.rs | 96 +++++++++++++++---- 1 file changed, 75 insertions(+), 21 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 6c4f852b0..e9822713c 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -127,14 +127,35 @@ impl StreamManager { // Promote if current-new exceeds threshold. if self.current_new_subs.len() > self.config.max_subs_in_new { + let overflow_count = self.current_new_subs.len() + - self.config.max_subs_in_new; + // The overflow pubkeys are the tail of new_pks. + let overflow_start = new_pks.len().saturating_sub( + overflow_count, + ); + let overflow_pks = &new_pks[overflow_start..]; + // Move current-new stream to unoptimized old. if let Some(stream) = self.current_new_stream.take() { self.unoptimized_old_streams.push(stream); } self.current_new_subs.clear(); - // Create a fresh empty current-new stream. - self.current_new_stream = None; + // Start fresh current-new with overflow pubkeys. + if overflow_pks.is_empty() { + self.current_new_stream = None; + } else { + for pk in overflow_pks { + self.current_new_subs.insert(*pk); + } + self.current_new_stream = + Some(self.create_account_stream( + &overflow_pks + .iter() + .collect::>(), + commitment, + )); + } // If unoptimized old streams exceed the limit, optimize. if self.unoptimized_old_streams.len() @@ -163,8 +184,28 @@ impl StreamManager { /// 2. Create a new stream for each chunk → `optimized_old_streams`. /// 3. Clear `unoptimized_old_streams`. /// 4. Reset the current-new stream (empty filter). - pub fn optimize(&mut self, _commitment: &CommitmentLevel) { - todo!("optimize") + pub fn optimize(&mut self, commitment: &CommitmentLevel) { + // Collect all active subscriptions and chunk them. + let all_pks: Vec = + self.subscriptions.iter().copied().collect(); + + // Build optimized old streams from chunks. + self.optimized_old_streams = all_pks + .chunks(self.config.max_subs_in_old_optimized) + .map(|chunk| { + let refs: Vec<&Pubkey> = chunk.iter().collect(); + self.stream_factory.subscribe( + Self::build_account_request(&refs, commitment), + ) + }) + .collect(); + + // Clear unoptimized old streams. + self.unoptimized_old_streams.clear(); + + // Reset the current-new stream. + self.current_new_subs.clear(); + self.current_new_stream = None; } /// Returns `true` if the pubkey is in the active `subscriptions` @@ -206,7 +247,17 @@ impl StreamManager { /// Returns mutable references to all account streams (optimized /// old + unoptimized old + current-new) for polling. pub fn all_account_streams_mut(&mut self) -> Vec<&mut LaserStream> { - todo!("all_account_streams_mut") + let mut streams = Vec::new(); + for s in &mut self.optimized_old_streams { + streams.push(s); + } + for s in &mut self.unoptimized_old_streams { + streams.push(s); + } + if let Some(s) = &mut self.current_new_stream { + streams.push(s); + } + streams } /// Returns the total number of account streams across all @@ -226,14 +277,12 @@ impl StreamManager { // Internal helpers // --------------------------------------------------------- - /// Build a `SubscribeRequest` and call the factory for the given - /// account pubkeys. Includes a slot subscription for chain slot - /// synchronisation (matching the legacy path). - fn create_account_stream( - &self, + /// Build a `SubscribeRequest` for the given account pubkeys. + /// Includes a slot subscription for chain slot synchronisation. + fn build_account_request( pubkeys: &[&Pubkey], commitment: &CommitmentLevel, - ) -> LaserStream { + ) -> SubscribeRequest { let mut accounts = HashMap::new(); accounts.insert( "account_subs".to_string(), @@ -255,12 +304,23 @@ impl StreamManager { }, ); - let request = SubscribeRequest { + SubscribeRequest { accounts, slots, commitment: Some((*commitment).into()), ..Default::default() - }; + } + } + + /// Build a `SubscribeRequest` and call the factory for the given + /// account pubkeys. + fn create_account_stream( + &self, + pubkeys: &[&Pubkey], + commitment: &CommitmentLevel, + ) -> LaserStream { + let request = + Self::build_account_request(pubkeys, commitment); self.stream_factory.subscribe(request) } @@ -940,14 +1000,8 @@ mod tests { subscribe_n(&mut mgr, 6); // Current-new also exists from the overflow pubkey. + let expected = mgr.account_stream_count(); let streams = mgr.all_account_streams_mut(); - let expected = mgr.optimized_old_stream_count() - + mgr.unoptimized_old_stream_count() - + if mgr.current_new_sub_count() > 0 { - 1 - } else { - 0 - }; assert_eq!(streams.len(), expected); } @@ -1148,4 +1202,4 @@ mod tests { assert_eq!(factory.captured_requests().len(), calls_before); } -} +} \ No newline at end of file From 2ea0e9c9402919a103c610c08212016be158514a Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 16 Feb 2026 17:40:34 +0700 Subject: [PATCH 47/64] chore: minor cleanup --- .../chain_laser_actor/stream_manager.rs | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index e9822713c..70888bc6e 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -244,17 +244,17 @@ impl StreamManager { self.optimized_old_streams.len() } - /// Returns mutable references to all account streams (optimized - /// old + unoptimized old + current-new) for polling. - pub fn all_account_streams_mut(&mut self) -> Vec<&mut LaserStream> { + /// Returns references to all account streams (optimized old + + /// unoptimized old + current-new) for inspection. + pub fn all_account_streams(&self) -> Vec<&LaserStream> { let mut streams = Vec::new(); - for s in &mut self.optimized_old_streams { + for s in &self.optimized_old_streams { streams.push(s); } - for s in &mut self.unoptimized_old_streams { + for s in &self.unoptimized_old_streams { streams.push(s); } - if let Some(s) = &mut self.current_new_stream { + if let Some(s) = &self.current_new_stream { streams.push(s); } streams @@ -560,7 +560,7 @@ mod tests { .captured_requests() .iter() .skip(start_idx) - .flat_map(|r| account_pubkeys_from_request(r)) + .flat_map(account_pubkeys_from_request) .collect() } @@ -711,7 +711,7 @@ mod tests { // Optimized old streams should exist. let total_subs = mgr.subscriptions().len(); let expected_optimized = - (total_subs + 9) / 10; // ceil(total / MAX_OLD_OPTIMIZED) + total_subs.div_ceil(10); // ceil(total / MAX_OLD_OPTIMIZED) assert_eq!( mgr.optimized_old_stream_count(), expected_optimized, @@ -1001,15 +1001,15 @@ mod tests { // Current-new also exists from the overflow pubkey. let expected = mgr.account_stream_count(); - let streams = mgr.all_account_streams_mut(); + let streams = mgr.all_account_streams(); assert_eq!(streams.len(), expected); } #[test] fn test_all_account_streams_empty_when_no_subscriptions() { - let (mut mgr, _factory) = create_manager(); + let (mgr, _factory) = create_manager(); - let streams = mgr.all_account_streams_mut(); + let streams = mgr.all_account_streams(); assert!(streams.is_empty()); } @@ -1026,7 +1026,7 @@ mod tests { mgr.optimize(&COMMITMENT); assert_eq!(mgr.unoptimized_old_stream_count(), 0); - let streams = mgr.all_account_streams_mut(); + let streams = mgr.all_account_streams(); // Only optimized old streams remain (current-new is empty // after optimize). assert_eq!(streams.len(), mgr.optimized_old_stream_count()); From bf48eee38610234546d4ce64eb31cc53c332bc29 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 17 Feb 2026 09:45:20 +0700 Subject: [PATCH 48/64] chore: minor cleanup and clarifications --- .../chain_laser_actor/mod.rs | 7 +-- .../chain_laser_actor/stream_manager.rs | 59 +++++++++---------- 2 files changed, 32 insertions(+), 34 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index cefa7a9b1..93a2edcdf 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -40,9 +40,8 @@ impl StreamFactoryImpl { impl StreamFactory for StreamFactoryImpl { fn subscribe(&self, request: SubscribeRequest) -> LaserStream { - Box::pin( - helius_laserstream::client::subscribe(self.config.clone(), request) - .0, - ) + let (stream, _handle) = + helius_laserstream::client::subscribe(self.config.clone(), request); + Box::pin(stream) } } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 70888bc6e..e9fb3b194 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -34,44 +34,44 @@ impl Default for StreamManagerConfig { /// /// Account subscriptions follow a generational approach: /// - New subscriptions go into the *current-new* stream. -/// - When the current-new stream exceeds `max_subs_in_new` it is -/// promoted to the *unoptimized old* streams vec and a fresh -/// current-new stream is created. -/// - When unoptimized old streams exceed `max_old_unoptimized`, +/// - When the current-new stream exceeds [StreamManagerConfig::max_subs_in_new] it is +/// promoted to the [Self::unoptimized_old_streams] vec and a fresh current-new stream is created. +/// - When [Self::unoptimized_old_streams] exceed [StreamManagerConfig::max_old_unoptimized], /// optimization is triggered which rebuilds all streams from the -/// `subscriptions` set into *optimized old* streams chunked by -/// `max_subs_in_old_optimized`. +/// `subscriptions` set into [StreamManager::optimized_old_streams] chunked by +/// [StreamManagerConfig::max_subs_in_old_optimized]. /// -/// Unsubscribe only removes from the `subscriptions` HashSet — it +/// Unsubscribe only removes from the [Self::subscriptions] HashSet — it /// never touches streams. Updates for unsubscribed pubkeys are /// ignored at the actor level. +/// Unsubscribed accounts are dropped as part of optimization. #[allow(unused)] pub struct StreamManager { + /// Configures limits for stream management config: StreamManagerConfig, + /// The factory used to create streams stream_factory: S, - - // ----- Program subscriptions (unchanged) ----- /// Active streams for program subscriptions program_subscriptions: Option<(HashSet, LaserStream)>, - - // ----- Generational account subscriptions ----- /// The canonical set of currently active account subscriptions. + /// These include subscriptions maintained across the different set of streams, + /// [Self::current_new_stream], [Self::unoptimized_old_streams], and + /// [Self::optimized_old_streams]. subscriptions: HashSet, /// Pubkeys that are part of the current-new stream's filter. current_new_subs: HashSet, - /// The current-new stream (None until the first subscribe call). + /// The current-new stream which holds the [Self::current_new_subs]. + /// (None until the first subscribe call). current_new_stream: Option, /// Old streams that have not been optimized yet. unoptimized_old_streams: Vec, /// Old streams created by optimization, each covering up to - /// `max_subs_in_old_optimized` subscriptions. + /// [StreamManagerConfig::max_subs_in_old_optimized] subscriptions. optimized_old_streams: Vec, } #[allow(unused)] impl StreamManager { - /// Creates a new stream manager with the given config and stream - /// factory. pub fn new(config: StreamManagerConfig, stream_factory: S) -> Self { Self { config, @@ -85,17 +85,16 @@ impl StreamManager { } } - // --------------------------------------------------------- - // Account subscription — generational API (stubs) - // --------------------------------------------------------- + // --------------------- + // Account subscription + // --------------------- /// Subscribe to account updates for the given pubkeys. /// - /// Each pubkey is added to `subscriptions` and to the current-new - /// stream. If the current-new stream exceeds `max_subs_in_new` it - /// is promoted and a fresh one is created. If unoptimized old - /// streams exceed `max_old_unoptimized`, optimization is - /// triggered. + /// Each pubkey is added to [Self::subscriptions] and to the [Self::current_new_stream]. + /// If the [Self::current_new_stream] exceeds [StreamManagerConfig::max_subs_in_new] it + /// is promoted and a fresh one is created. If [Self::unoptimized_old_streams] exceed + /// [StreamManagerConfig::max_old_unoptimized], optimization is triggered. pub fn account_subscribe( &mut self, pubkeys: &[Pubkey], @@ -225,28 +224,28 @@ impl StreamManager { /// Returns the number of pubkeys in the current-new stream's /// filter. - pub fn current_new_sub_count(&self) -> usize { + fn current_new_sub_count(&self) -> usize { self.current_new_subs.len() } /// Returns a reference to the current-new stream's pubkey set. - pub fn current_new_subs(&self) -> &HashSet { + fn current_new_subs(&self) -> &HashSet { &self.current_new_subs } /// Returns the number of unoptimized old streams. - pub fn unoptimized_old_stream_count(&self) -> usize { + fn unoptimized_old_stream_count(&self) -> usize { self.unoptimized_old_streams.len() } /// Returns the number of optimized old streams. - pub fn optimized_old_stream_count(&self) -> usize { + fn optimized_old_stream_count(&self) -> usize { self.optimized_old_streams.len() } /// Returns references to all account streams (optimized old + /// unoptimized old + current-new) for inspection. - pub fn all_account_streams(&self) -> Vec<&LaserStream> { + fn all_account_streams(&self) -> Vec<&LaserStream> { let mut streams = Vec::new(); for s in &self.optimized_old_streams { streams.push(s); @@ -262,7 +261,7 @@ impl StreamManager { /// Returns the total number of account streams across all /// generations. - pub fn account_stream_count(&self) -> usize { + fn account_stream_count(&self) -> usize { let current = if self.current_new_stream.is_some() { 1 } else { @@ -1202,4 +1201,4 @@ mod tests { assert_eq!(factory.captured_requests().len(), calls_before); } -} \ No newline at end of file +} From b2990a68f38183857aa353073d9cc2a29503272a Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 17 Feb 2026 15:31:29 +0700 Subject: [PATCH 49/64] chore: prep to include handle in subscribe return --- .../chain_laser_actor/actor.rs | 18 +++++--- .../chain_laser_actor/mock.rs | 38 ++++++++++++---- .../chain_laser_actor/mod.rs | 44 +++++++++++++++---- .../chain_laser_actor/stream_manager.rs | 32 ++++++++------ 4 files changed, 94 insertions(+), 38 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 1cc1ff7dd..6a5adbff5 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -1,6 +1,7 @@ use std::{ collections::HashSet, fmt, + marker::PhantomData, sync::{ atomic::{AtomicU16, AtomicU64, Ordering}, Arc, @@ -31,7 +32,8 @@ use tonic::Code; use tracing::*; use super::{ - LaserResult, LaserStream, StreamFactory, StreamManager, StreamManagerConfig, + LaserResult, LaserStream, StreamFactory, StreamHandle, StreamManager, + StreamManagerConfig, }; use crate::remote_account_provider::{ chain_rpc_client::{ChainRpcClient, ChainRpcClientImpl}, @@ -112,9 +114,9 @@ impl fmt::Display for AccountUpdateSource { /// - If a stream ends unexpectedly, `signal_connection_issue()` is called. /// - The actor sends an abort signal to the submux, which triggers reconnection. /// - The actor itself doesn't attempt to reconnect; it relies on external recovery. -pub struct ChainLaserActor { +pub struct ChainLaserActor> { /// Manager for creating laser streams - stream_manager: StreamManager, + stream_manager: StreamManager, /// Requested subscriptions, some may not be active yet. /// Shared with ChainLaserClientImpl for sync access to /// subscription_count and subscriptions_union. @@ -139,9 +141,10 @@ pub struct ChainLaserActor { client_id: String, /// RPC client for diagnostics (e.g., fetching slot when falling behind) rpc_client: ChainRpcClientImpl, + _phantom: PhantomData, } -impl ChainLaserActor { +impl ChainLaserActor { pub fn new_from_url( pubsub_url: &str, client_id: &str, @@ -204,7 +207,7 @@ impl ChainLaserActor { } } -impl ChainLaserActor { +impl> ChainLaserActor { /// Create actor with a custom stream factory (for testing) pub fn with_stream_factory( client_id: &str, @@ -243,6 +246,7 @@ impl ChainLaserActor { slots, client_id: client_id.to_string(), rpc_client, + _phantom: PhantomData, }; ( @@ -659,7 +663,7 @@ impl ChainLaserActor { subscriptions: &SharedSubscriptions, active_subscriptions: &mut StreamMap, active_subscription_pubkeys: &mut HashSet, - stream_manager: &mut StreamManager, + stream_manager: &mut StreamManager, ) { subscriptions.write().clear(); active_subscriptions.clear(); @@ -676,7 +680,7 @@ impl ChainLaserActor { subscriptions: &SharedSubscriptions, active_subscriptions: &mut StreamMap, active_subscription_pubkeys: &mut HashSet, - stream_manager: &mut StreamManager, + stream_manager: &mut StreamManager, abort_sender: &mpsc::Sender<()>, client_id: &str, ) { diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs index 654c8b7b7..ee2a286ca 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs @@ -1,10 +1,13 @@ +use async_trait::async_trait; use std::sync::{Arc, Mutex}; -use helius_laserstream::{grpc, grpc::SubscribeRequest, LaserstreamError}; +use helius_laserstream::{LaserstreamError, grpc::{self, SubscribeRequest}}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use super::{LaserResult, LaserStream, StreamFactory}; +use crate::remote_account_provider::chain_laser_actor::{LaserStreamWithHandle, StreamHandle}; + +use super::{LaserResult, StreamFactory}; /// A test mock that captures subscription requests and allows driving streams /// programmatically @@ -88,17 +91,34 @@ impl Default for MockStreamFactory { } } -impl StreamFactory for MockStreamFactory { - fn subscribe(&self, request: SubscribeRequest) -> LaserStream { +pub struct MockStreamHandle { + write_tx: mpsc::UnboundedSender, +} + +#[async_trait] +impl StreamHandle for MockStreamHandle { + async fn write(&self, request: SubscribeRequest) -> Result<(), LaserstreamError> { + self.write_tx.send(request).map_err(|_| { + LaserstreamError::ConnectionError("Failed to send update to stream".to_string()) + }) + } +} + +impl StreamFactory for MockStreamFactory { + fn subscribe(&self, request: SubscribeRequest) -> LaserStreamWithHandle { // Record the request self.captured_requests.lock().unwrap().push(request); - // Create a channel and store the sender - let (tx, rx) = mpsc::unbounded_channel(); - self.stream_senders.lock().unwrap().push(tx); + // Create a channel for driving LaserResult items into the stream + let (stream_tx, stream_rx) = mpsc::unbounded_channel::(); + self.stream_senders.lock().unwrap().push(stream_tx); + + // Create a channel for the handle's write method + let (write_tx, _write_rx) = mpsc::unbounded_channel::(); + let handle = MockStreamHandle { write_tx }; - // Return the receiver wrapped as a stream - Box::pin(UnboundedReceiverStream::new(rx)) + let stream = Box::pin(UnboundedReceiverStream::new(stream_rx)); + LaserStreamWithHandle { stream, handle } } } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index 93a2edcdf..6a9dbacab 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -1,9 +1,9 @@ +use async_trait::async_trait; use std::pin::Pin; use futures_util::Stream; use helius_laserstream::{ - grpc::{SubscribeRequest, SubscribeUpdate}, - LaserstreamError, + LaserstreamError, StreamHandle as HeliusStreamHandle, grpc::{SubscribeRequest, SubscribeUpdate} }; pub use self::{ @@ -22,9 +22,34 @@ pub type LaserResult = Result; pub type LaserStream = Pin + Send>>; /// Abstraction over stream creation for testability -pub trait StreamFactory: Send + Sync + 'static { +pub trait StreamFactory: Send + Sync + 'static { /// Create a stream for the given subscription request - fn subscribe(&self, request: SubscribeRequest) -> LaserStream; + fn subscribe(&self, request: SubscribeRequest) -> LaserStreamWithHandle; +} + +/// A trait to represent the [HeliusStreamHandle]. +/// This is needed since we cannot create the helius one since +/// [helius_laserstream::StreamHandle::write_tx] is private and there is no constructor. +#[async_trait] +pub trait StreamHandle { + /// Send a new subscription request to update the active subscription. + async fn write(&self, request: SubscribeRequest) -> Result<(), LaserstreamError>; +} + +pub struct LaserStreamWithHandle { + pub(crate) stream: LaserStream, + pub(crate) handle: S, +} + +pub struct StreamHandleImpl { + pub handle: HeliusStreamHandle, +} + +#[async_trait] +impl StreamHandle for StreamHandleImpl { + async fn write(&self, request: SubscribeRequest) -> Result<(), LaserstreamError> { + self.handle.write(request).await + } } /// Production stream factory that wraps helius client subscribe @@ -38,10 +63,13 @@ impl StreamFactoryImpl { } } -impl StreamFactory for StreamFactoryImpl { - fn subscribe(&self, request: SubscribeRequest) -> LaserStream { - let (stream, _handle) = +impl StreamFactory for StreamFactoryImpl { + fn subscribe(&self, request: SubscribeRequest) -> LaserStreamWithHandle { + let (stream, handle) = helius_laserstream::client::subscribe(self.config.clone(), request); - Box::pin(stream) + LaserStreamWithHandle { + stream: Box::pin(stream), + handle: StreamHandleImpl { handle }, + } } } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index e9fb3b194..4f09a9a78 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -1,4 +1,4 @@ -use std::collections::{HashMap, HashSet}; +use std::{collections::{HashMap, HashSet}, marker::PhantomData}; use helius_laserstream::grpc::{ CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, @@ -6,6 +6,8 @@ use helius_laserstream::grpc::{ }; use solana_pubkey::Pubkey; +use crate::remote_account_provider::chain_laser_actor::StreamHandle; + use super::{LaserStream, StreamFactory}; /// Configuration for the generational stream manager. @@ -46,11 +48,11 @@ impl Default for StreamManagerConfig { /// ignored at the actor level. /// Unsubscribed accounts are dropped as part of optimization. #[allow(unused)] -pub struct StreamManager { +pub struct StreamManager> { /// Configures limits for stream management config: StreamManagerConfig, /// The factory used to create streams - stream_factory: S, + stream_factory: SF, /// Active streams for program subscriptions program_subscriptions: Option<(HashSet, LaserStream)>, /// The canonical set of currently active account subscriptions. @@ -68,11 +70,12 @@ pub struct StreamManager { /// Old streams created by optimization, each covering up to /// [StreamManagerConfig::max_subs_in_old_optimized] subscriptions. optimized_old_streams: Vec, + _phantom: PhantomData, } #[allow(unused)] -impl StreamManager { - pub fn new(config: StreamManagerConfig, stream_factory: S) -> Self { +impl> StreamManager { + pub fn new(config: StreamManagerConfig, stream_factory: SF) -> Self { Self { config, stream_factory, @@ -82,6 +85,7 @@ impl StreamManager { current_new_stream: None, unoptimized_old_streams: Vec::new(), optimized_old_streams: Vec::new(), + _phantom: PhantomData, } } @@ -195,7 +199,7 @@ impl StreamManager { let refs: Vec<&Pubkey> = chunk.iter().collect(); self.stream_factory.subscribe( Self::build_account_request(&refs, commitment), - ) + ).stream }) .collect(); @@ -320,7 +324,7 @@ impl StreamManager { ) -> LaserStream { let request = Self::build_account_request(pubkeys, commitment); - self.stream_factory.subscribe(request) + self.stream_factory.subscribe(request).stream } // ========================================================= @@ -363,7 +367,7 @@ impl StreamManager { from_slot, ..Default::default() }; - self.stream_factory.subscribe(request) + self.stream_factory.subscribe(request).stream } /// Adds a program subscription. If the program is already @@ -430,7 +434,7 @@ impl StreamManager { commitment: Some((*commitment).into()), ..Default::default() }; - self.stream_factory.subscribe(request) + self.stream_factory.subscribe(request).stream } } @@ -440,7 +444,7 @@ mod tests { use solana_pubkey::Pubkey; use super::*; - use crate::remote_account_provider::chain_laser_actor::mock::MockStreamFactory; + use crate::remote_account_provider::chain_laser_actor::mock::{MockStreamFactory, MockStreamHandle}; // ----------------- // Helpers @@ -453,7 +457,7 @@ mod tests { } } - fn create_manager() -> (StreamManager, MockStreamFactory) { + fn create_manager() -> (StreamManager, MockStreamFactory) { let factory = MockStreamFactory::new(); let manager = StreamManager::new(test_config(), factory.clone()); (manager, factory) @@ -475,7 +479,7 @@ mod tests { /// Assert that `subscriptions()` contains exactly `expected` /// (order-independent, exact count). fn assert_subscriptions_eq( - mgr: &StreamManager, + mgr: &StreamManager, expected: &[Pubkey], ) { let subs = mgr.subscriptions(); @@ -522,7 +526,7 @@ mod tests { /// Subscribe `n` pubkeys one-at-a-time, returning the created /// pubkeys. fn subscribe_n( - mgr: &mut StreamManager, + mgr: &mut StreamManager, n: usize, ) -> Vec { let pks = make_pubkeys(n); @@ -533,7 +537,7 @@ mod tests { /// Subscribe pubkeys in batches of `batch` until `total` pubkeys /// have been subscribed. Returns all created pubkeys. fn subscribe_in_batches( - mgr: &mut StreamManager, + mgr: &mut StreamManager, total: usize, batch: usize, ) -> Vec { From 613768ead2a8942fc1e68138e2ef7bb6ed6d76a1 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 17 Feb 2026 15:56:59 +0700 Subject: [PATCH 50/64] chore: stream handle test integration --- .../chain_laser_actor/mock.rs | 168 ++++++++++++++---- .../chain_laser_actor/mod.rs | 25 ++- .../chain_laser_actor/stream_manager.rs | 95 ++++------ 3 files changed, 192 insertions(+), 96 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs index ee2a286ca..7fc7ad529 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs @@ -1,29 +1,40 @@ use async_trait::async_trait; use std::sync::{Arc, Mutex}; -use helius_laserstream::{LaserstreamError, grpc::{self, SubscribeRequest}}; +use helius_laserstream::{ + grpc::{self, SubscribeRequest}, + LaserstreamError, +}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::remote_account_provider::chain_laser_actor::{LaserStreamWithHandle, StreamHandle}; +use crate::remote_account_provider::chain_laser_actor::{ + LaserStreamWithHandle, StreamHandle, +}; use super::{LaserResult, StreamFactory}; -/// A test mock that captures subscription requests and allows driving streams -/// programmatically +/// A test mock that captures subscription requests and allows driving +/// streams programmatically. #[derive(Clone)] #[allow(dead_code)] pub struct MockStreamFactory { - /// Every SubscribeRequest passed to `subscribe()` is recorded here - /// so tests can assert on filter contents, commitment levels, etc. + /// Every `SubscribeRequest` passed to `subscribe()` is recorded + /// here so tests can assert on filter contents, commitment levels, + /// etc. captured_requests: Arc>>, - /// A sender that the test uses to push `LaserResult` items into the - /// streams returned by `subscribe()`. - /// Each call to `subscribe()` creates a new mpsc channel; the rx side - /// becomes the returned stream, and the tx side is stored here so the - /// test can drive updates. - stream_senders: Arc>>>, + /// Requests sent through a `MockStreamHandle::write()` call are + /// recorded here so tests can verify handle-driven updates. + handle_requests: Arc>>, + + /// A sender that the test uses to push `LaserResult` items into + /// the streams returned by `subscribe()`. + /// Each call to `subscribe()` creates a new mpsc channel; the rx + /// side becomes the returned stream, and the tx side is stored + /// here so the test can drive updates. + stream_senders: + Arc>>>>, } #[allow(dead_code)] @@ -32,17 +43,28 @@ impl MockStreamFactory { pub fn new() -> Self { Self { captured_requests: Arc::new(Mutex::new(Vec::new())), + handle_requests: Arc::new(Mutex::new(Vec::new())), stream_senders: Arc::new(Mutex::new(Vec::new())), } } - /// Get the captured subscription requests + /// Get the captured subscription requests (from `subscribe()`) pub fn captured_requests(&self) -> Vec { self.captured_requests.lock().unwrap().clone() } + /// Get the requests sent through stream handles (from + /// `handle.write()`) + pub fn handle_requests(&self) -> Vec { + self.handle_requests.lock().unwrap().clone() + } + /// Push an error update to a specific stream - pub fn push_error_to_stream(&self, idx: usize, error: LaserstreamError) { + pub fn push_error_to_stream( + &self, + idx: usize, + error: LaserstreamError, + ) { let senders = self.stream_senders.lock().unwrap(); if let Some(sender) = senders.get(idx) { let _ = sender.send(Err(error)); @@ -58,7 +80,11 @@ impl MockStreamFactory { } /// Push an update to a specific stream by index - pub fn push_update_to_stream(&self, idx: usize, update: LaserResult) { + pub fn push_update_to_stream( + &self, + idx: usize, + update: LaserResult, + ) { let senders = self.stream_senders.lock().unwrap(); if let Some(sender) = senders.get(idx) { let _ = sender.send(update); @@ -78,9 +104,10 @@ impl MockStreamFactory { } } - /// Clear all state (requests and streams) + /// Clear all state (requests, handle requests and streams) pub fn clear(&self) { self.captured_requests.lock().unwrap().clear(); + self.handle_requests.lock().unwrap().clear(); self.stream_senders.lock().unwrap().clear(); } } @@ -91,33 +118,51 @@ impl Default for MockStreamFactory { } } +/// Mock handle that records write requests and drains them into the +/// shared `handle_requests` vec on the factory. +#[derive(Clone)] +#[allow(dead_code)] pub struct MockStreamHandle { - write_tx: mpsc::UnboundedSender, + handle_requests: Arc>>, } #[async_trait] impl StreamHandle for MockStreamHandle { - async fn write(&self, request: SubscribeRequest) -> Result<(), LaserstreamError> { - self.write_tx.send(request).map_err(|_| { - LaserstreamError::ConnectionError("Failed to send update to stream".to_string()) - }) + async fn write( + &self, + request: SubscribeRequest, + ) -> Result<(), LaserstreamError> { + self.handle_requests + .lock() + .unwrap() + .push(request); + Ok(()) } } impl StreamFactory for MockStreamFactory { - fn subscribe(&self, request: SubscribeRequest) -> LaserStreamWithHandle { - // Record the request + fn subscribe( + &self, + request: SubscribeRequest, + ) -> LaserStreamWithHandle { + // Record the initial subscribe request self.captured_requests.lock().unwrap().push(request); - // Create a channel for driving LaserResult items into the stream - let (stream_tx, stream_rx) = mpsc::unbounded_channel::(); + // Create a channel for driving LaserResult items into the + // stream + let (stream_tx, stream_rx) = + mpsc::unbounded_channel::(); + let stream = Box::pin(UnboundedReceiverStream::new(stream_rx)); + + let stream_tx = Arc::new(stream_tx); self.stream_senders.lock().unwrap().push(stream_tx); - // Create a channel for the handle's write method - let (write_tx, _write_rx) = mpsc::unbounded_channel::(); - let handle = MockStreamHandle { write_tx }; + // The handle shares the factory's handle_requests vec so + // every write is visible to tests immediately. + let handle = MockStreamHandle { + handle_requests: Arc::clone(&self.handle_requests), + }; - let stream = Box::pin(UnboundedReceiverStream::new(stream_rx)); LaserStreamWithHandle { stream, handle } } } @@ -156,16 +201,72 @@ mod tests { } #[tokio::test] - async fn test_mock_can_drive_updates() { + async fn test_mock_handle_write_records_requests() { let mock = MockStreamFactory::new(); let request = SubscribeRequest::default(); - let _stream = mock.subscribe(request); + let result = mock.subscribe(request); assert_eq!(mock.active_stream_count(), 1); - // The stream is created but we can't easily test the update without - // running the actual stream, which is tested in integration tests + // Write an updated request through the handle + let mut accounts = HashMap::new(); + accounts.insert( + "updated".to_string(), + SubscribeRequestFilterAccounts::default(), + ); + let update_request = SubscribeRequest { + accounts, + commitment: Some(CommitmentLevel::Confirmed.into()), + ..Default::default() + }; + + result + .handle + .write(update_request.clone()) + .await + .unwrap(); + + let handle_reqs = mock.handle_requests(); + assert_eq!(handle_reqs.len(), 1); + assert_eq!( + handle_reqs[0].commitment, + update_request.commitment + ); + assert!(handle_reqs[0].accounts.contains_key("updated")); + } + + #[tokio::test] + async fn test_mock_handle_write_multiple() { + let mock = MockStreamFactory::new(); + + let r1 = mock.subscribe(SubscribeRequest::default()); + let r2 = mock.subscribe(SubscribeRequest::default()); + + // Both handles share the same handle_requests vec + r1.handle + .write(SubscribeRequest { + commitment: Some( + CommitmentLevel::Processed.into(), + ), + ..Default::default() + }) + .await + .unwrap(); + + r2.handle + .write(SubscribeRequest { + commitment: Some( + CommitmentLevel::Finalized.into(), + ), + ..Default::default() + }) + .await + .unwrap(); + + let handle_reqs = mock.handle_requests(); + assert_eq!(handle_reqs.len(), 2); + assert_eq!(mock.captured_requests().len(), 2); } #[test] @@ -180,5 +281,6 @@ mod tests { mock.clear(); assert_eq!(mock.captured_requests().len(), 0); + assert_eq!(mock.handle_requests().len(), 0); } } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index 6a9dbacab..e9be0aa60 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -1,9 +1,10 @@ -use async_trait::async_trait; use std::pin::Pin; +use async_trait::async_trait; use futures_util::Stream; use helius_laserstream::{ - LaserstreamError, StreamHandle as HeliusStreamHandle, grpc::{SubscribeRequest, SubscribeUpdate} + grpc::{SubscribeRequest, SubscribeUpdate}, + LaserstreamError, StreamHandle as HeliusStreamHandle, }; pub use self::{ @@ -31,23 +32,32 @@ pub trait StreamFactory: Send + Sync + 'static { /// This is needed since we cannot create the helius one since /// [helius_laserstream::StreamHandle::write_tx] is private and there is no constructor. #[async_trait] +#[allow(dead_code)] pub trait StreamHandle { /// Send a new subscription request to update the active subscription. - async fn write(&self, request: SubscribeRequest) -> Result<(), LaserstreamError>; + async fn write( + &self, + request: SubscribeRequest, + ) -> Result<(), LaserstreamError>; } +#[allow(dead_code)] pub struct LaserStreamWithHandle { pub(crate) stream: LaserStream, pub(crate) handle: S, } +#[allow(dead_code)] pub struct StreamHandleImpl { - pub handle: HeliusStreamHandle, + pub handle: HeliusStreamHandle, } #[async_trait] impl StreamHandle for StreamHandleImpl { - async fn write(&self, request: SubscribeRequest) -> Result<(), LaserstreamError> { + async fn write( + &self, + request: SubscribeRequest, + ) -> Result<(), LaserstreamError> { self.handle.write(request).await } } @@ -64,7 +74,10 @@ impl StreamFactoryImpl { } impl StreamFactory for StreamFactoryImpl { - fn subscribe(&self, request: SubscribeRequest) -> LaserStreamWithHandle { + fn subscribe( + &self, + request: SubscribeRequest, + ) -> LaserStreamWithHandle { let (stream, handle) = helius_laserstream::client::subscribe(self.config.clone(), request); LaserStreamWithHandle { diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 4f09a9a78..3bced8693 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -1,4 +1,7 @@ -use std::{collections::{HashMap, HashSet}, marker::PhantomData}; +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, +}; use helius_laserstream::grpc::{ CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, @@ -6,9 +9,8 @@ use helius_laserstream::grpc::{ }; use solana_pubkey::Pubkey; -use crate::remote_account_provider::chain_laser_actor::StreamHandle; - use super::{LaserStream, StreamFactory}; +use crate::remote_account_provider::chain_laser_actor::StreamHandle; /// Configuration for the generational stream manager. #[allow(unused)] @@ -122,20 +124,17 @@ impl> StreamManager { // (Re)create the current-new stream with the full // current_new_subs filter. - self.current_new_stream = - Some(self.create_account_stream( - &self.current_new_subs.iter().collect::>(), - commitment, - )); + self.current_new_stream = Some(self.create_account_stream( + &self.current_new_subs.iter().collect::>(), + commitment, + )); // Promote if current-new exceeds threshold. if self.current_new_subs.len() > self.config.max_subs_in_new { - let overflow_count = self.current_new_subs.len() - - self.config.max_subs_in_new; + let overflow_count = + self.current_new_subs.len() - self.config.max_subs_in_new; // The overflow pubkeys are the tail of new_pks. - let overflow_start = new_pks.len().saturating_sub( - overflow_count, - ); + let overflow_start = new_pks.len().saturating_sub(overflow_count); let overflow_pks = &new_pks[overflow_start..]; // Move current-new stream to unoptimized old. @@ -151,13 +150,10 @@ impl> StreamManager { for pk in overflow_pks { self.current_new_subs.insert(*pk); } - self.current_new_stream = - Some(self.create_account_stream( - &overflow_pks - .iter() - .collect::>(), - commitment, - )); + self.current_new_stream = Some(self.create_account_stream( + &overflow_pks.iter().collect::>(), + commitment, + )); } // If unoptimized old streams exceed the limit, optimize. @@ -189,17 +185,16 @@ impl> StreamManager { /// 4. Reset the current-new stream (empty filter). pub fn optimize(&mut self, commitment: &CommitmentLevel) { // Collect all active subscriptions and chunk them. - let all_pks: Vec = - self.subscriptions.iter().copied().collect(); + let all_pks: Vec = self.subscriptions.iter().copied().collect(); // Build optimized old streams from chunks. self.optimized_old_streams = all_pks .chunks(self.config.max_subs_in_old_optimized) .map(|chunk| { let refs: Vec<&Pubkey> = chunk.iter().collect(); - self.stream_factory.subscribe( - Self::build_account_request(&refs, commitment), - ).stream + self.stream_factory + .subscribe(Self::build_account_request(&refs, commitment)) + .stream }) .collect(); @@ -290,10 +285,7 @@ impl> StreamManager { accounts.insert( "account_subs".to_string(), SubscribeRequestFilterAccounts { - account: pubkeys - .iter() - .map(|pk| pk.to_string()) - .collect(), + account: pubkeys.iter().map(|pk| pk.to_string()).collect(), ..Default::default() }, ); @@ -322,8 +314,7 @@ impl> StreamManager { pubkeys: &[&Pubkey], commitment: &CommitmentLevel, ) -> LaserStream { - let request = - Self::build_account_request(pubkeys, commitment); + let request = Self::build_account_request(pubkeys, commitment); self.stream_factory.subscribe(request).stream } @@ -444,7 +435,9 @@ mod tests { use solana_pubkey::Pubkey; use super::*; - use crate::remote_account_provider::chain_laser_actor::mock::{MockStreamFactory, MockStreamHandle}; + use crate::remote_account_provider::chain_laser_actor::mock::{ + MockStreamFactory, MockStreamHandle, + }; // ----------------- // Helpers @@ -457,7 +450,10 @@ mod tests { } } - fn create_manager() -> (StreamManager, MockStreamFactory) { + fn create_manager() -> ( + StreamManager, + MockStreamFactory, + ) { let factory = MockStreamFactory::new(); let manager = StreamManager::new(test_config(), factory.clone()); (manager, factory) @@ -713,12 +709,8 @@ mod tests { assert_eq!(mgr.unoptimized_old_stream_count(), 0); // Optimized old streams should exist. let total_subs = mgr.subscriptions().len(); - let expected_optimized = - total_subs.div_ceil(10); // ceil(total / MAX_OLD_OPTIMIZED) - assert_eq!( - mgr.optimized_old_stream_count(), - expected_optimized, - ); + let expected_optimized = total_subs.div_ceil(10); // ceil(total / MAX_OLD_OPTIMIZED) + assert_eq!(mgr.optimized_old_stream_count(), expected_optimized,); } #[test] @@ -788,8 +780,7 @@ mod tests { // pubkeys. let remaining: HashSet = pks[5..].iter().map(|pk| pk.to_string()).collect(); - let filter_pks = - all_filter_pubkeys_from(&factory, reqs_before); + let filter_pks = all_filter_pubkeys_from(&factory, reqs_before); assert_eq!(filter_pks.len(), 10); for pk in &to_unsub { assert!( @@ -826,10 +817,7 @@ mod tests { let count_after_first = mgr.optimized_old_stream_count(); mgr.optimize(&COMMITMENT); - assert_eq!( - mgr.optimized_old_stream_count(), - count_after_first, - ); + assert_eq!(mgr.optimized_old_stream_count(), count_after_first,); } // ------------------------------------------------------------- @@ -871,10 +859,7 @@ mod tests { subscribe_n(&mut mgr, 6); // Unoptimized grows by 1 but no second optimization. assert!(mgr.unoptimized_old_stream_count() <= 1); - assert_eq!( - mgr.optimized_old_stream_count(), - optimized_after_first, - ); + assert_eq!(mgr.optimized_old_stream_count(), optimized_after_first,); } // ------------------------------------------------------------- @@ -1017,8 +1002,7 @@ mod tests { } #[test] - fn test_all_account_streams_after_optimize_drops_old_unoptimized() - { + fn test_all_account_streams_after_optimize_drops_old_unoptimized() { let (mut mgr, _factory) = create_manager(); // Create unoptimized old streams. for _ in 0..2 { @@ -1086,8 +1070,7 @@ mod tests { // Verify the union of all optimized stream filters equals all // 50 pubkeys. - let filter_pks = - all_filter_pubkeys_from(&factory, reqs_before); + let filter_pks = all_filter_pubkeys_from(&factory, reqs_before); assert_eq!(filter_pks.len(), 50); for pk in &pks { assert!(filter_pks.contains(&pk.to_string())); @@ -1114,8 +1097,7 @@ mod tests { let reqs_before = factory.captured_requests().len(); mgr.optimize(&COMMITMENT); - let filter_pks = - all_filter_pubkeys_from(&factory, reqs_before); + let filter_pks = all_filter_pubkeys_from(&factory, reqs_before); assert_eq!(filter_pks.len(), expected_count); // Verify unsubscribed pubkeys are absent. for pk in &unsub1 { @@ -1184,8 +1166,7 @@ mod tests { assert_eq!(optimize_reqs.len(), 2); let first_pks = account_pubkeys_from_request(&optimize_reqs[0]); - let second_pks = - account_pubkeys_from_request(&optimize_reqs[1]); + let second_pks = account_pubkeys_from_request(&optimize_reqs[1]); assert_eq!(first_pks.len(), 10); assert_eq!(second_pks.len(), 5); From da4c6e7ca1649de92d19c7403670f85da364a3e3 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 17 Feb 2026 16:31:29 +0700 Subject: [PATCH 51/64] chore: operate on full streams with handles --- .../chain_laser_actor/actor.rs | 6 +-- .../chain_laser_actor/mock.rs | 47 +++++-------------- .../chain_laser_actor/stream_manager.rs | 41 ++++++++-------- 3 files changed, 33 insertions(+), 61 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 6a5adbff5..287b04760 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -308,7 +308,7 @@ impl> ChainLaserActor { // Program subscription updates update = async { match self.stream_manager.program_stream_mut() { - Some(stream) => stream.next().await, + Some(swh) => swh.stream.next().await, None => std::future::pending().await, } }, if self.stream_manager.has_program_subscriptions() => { @@ -486,13 +486,13 @@ impl> ChainLaserActor { } for (idx, chunk) in chunks.into_iter().enumerate() { - let stream = self.stream_manager.account_subscribe_old( + let swh = self.stream_manager.account_subscribe_old( &chunk, &self.commitment, idx, from_slot, ); - new_subs.insert(idx, stream); + new_subs.insert(idx, swh.stream); } // Drop current active subscriptions by reassignig to new ones diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs index 7fc7ad529..04e6a4d28 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs @@ -1,6 +1,6 @@ -use async_trait::async_trait; use std::sync::{Arc, Mutex}; +use async_trait::async_trait; use helius_laserstream::{ grpc::{self, SubscribeRequest}, LaserstreamError, @@ -8,12 +8,11 @@ use helius_laserstream::{ use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; +use super::{LaserResult, StreamFactory}; use crate::remote_account_provider::chain_laser_actor::{ LaserStreamWithHandle, StreamHandle, }; -use super::{LaserResult, StreamFactory}; - /// A test mock that captures subscription requests and allows driving /// streams programmatically. #[derive(Clone)] @@ -33,8 +32,7 @@ pub struct MockStreamFactory { /// Each call to `subscribe()` creates a new mpsc channel; the rx /// side becomes the returned stream, and the tx side is stored /// here so the test can drive updates. - stream_senders: - Arc>>>>, + stream_senders: Arc>>>>, } #[allow(dead_code)] @@ -60,11 +58,7 @@ impl MockStreamFactory { } /// Push an error update to a specific stream - pub fn push_error_to_stream( - &self, - idx: usize, - error: LaserstreamError, - ) { + pub fn push_error_to_stream(&self, idx: usize, error: LaserstreamError) { let senders = self.stream_senders.lock().unwrap(); if let Some(sender) = senders.get(idx) { let _ = sender.send(Err(error)); @@ -80,11 +74,7 @@ impl MockStreamFactory { } /// Push an update to a specific stream by index - pub fn push_update_to_stream( - &self, - idx: usize, - update: LaserResult, - ) { + pub fn push_update_to_stream(&self, idx: usize, update: LaserResult) { let senders = self.stream_senders.lock().unwrap(); if let Some(sender) = senders.get(idx) { let _ = sender.send(update); @@ -132,10 +122,7 @@ impl StreamHandle for MockStreamHandle { &self, request: SubscribeRequest, ) -> Result<(), LaserstreamError> { - self.handle_requests - .lock() - .unwrap() - .push(request); + self.handle_requests.lock().unwrap().push(request); Ok(()) } } @@ -150,8 +137,7 @@ impl StreamFactory for MockStreamFactory { // Create a channel for driving LaserResult items into the // stream - let (stream_tx, stream_rx) = - mpsc::unbounded_channel::(); + let (stream_tx, stream_rx) = mpsc::unbounded_channel::(); let stream = Box::pin(UnboundedReceiverStream::new(stream_rx)); let stream_tx = Arc::new(stream_tx); @@ -221,18 +207,11 @@ mod tests { ..Default::default() }; - result - .handle - .write(update_request.clone()) - .await - .unwrap(); + result.handle.write(update_request.clone()).await.unwrap(); let handle_reqs = mock.handle_requests(); assert_eq!(handle_reqs.len(), 1); - assert_eq!( - handle_reqs[0].commitment, - update_request.commitment - ); + assert_eq!(handle_reqs[0].commitment, update_request.commitment); assert!(handle_reqs[0].accounts.contains_key("updated")); } @@ -246,9 +225,7 @@ mod tests { // Both handles share the same handle_requests vec r1.handle .write(SubscribeRequest { - commitment: Some( - CommitmentLevel::Processed.into(), - ), + commitment: Some(CommitmentLevel::Processed.into()), ..Default::default() }) .await @@ -256,9 +233,7 @@ mod tests { r2.handle .write(SubscribeRequest { - commitment: Some( - CommitmentLevel::Finalized.into(), - ), + commitment: Some(CommitmentLevel::Finalized.into()), ..Default::default() }) .await diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 3bced8693..0ab59c242 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - marker::PhantomData, -}; +use std::collections::{HashMap, HashSet}; use helius_laserstream::grpc::{ CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, @@ -9,7 +6,7 @@ use helius_laserstream::grpc::{ }; use solana_pubkey::Pubkey; -use super::{LaserStream, StreamFactory}; +use super::{LaserStreamWithHandle, StreamFactory}; use crate::remote_account_provider::chain_laser_actor::StreamHandle; /// Configuration for the generational stream manager. @@ -56,23 +53,23 @@ pub struct StreamManager> { /// The factory used to create streams stream_factory: SF, /// Active streams for program subscriptions - program_subscriptions: Option<(HashSet, LaserStream)>, + program_subscriptions: Option<(HashSet, LaserStreamWithHandle)>, /// The canonical set of currently active account subscriptions. - /// These include subscriptions maintained across the different set of streams, - /// [Self::current_new_stream], [Self::unoptimized_old_streams], and + /// These include subscriptions maintained across the different set + /// of streams, [Self::current_new_stream], + /// [Self::unoptimized_old_streams], and /// [Self::optimized_old_streams]. subscriptions: HashSet, /// Pubkeys that are part of the current-new stream's filter. current_new_subs: HashSet, /// The current-new stream which holds the [Self::current_new_subs]. /// (None until the first subscribe call). - current_new_stream: Option, + current_new_stream: Option>, /// Old streams that have not been optimized yet. - unoptimized_old_streams: Vec, + unoptimized_old_streams: Vec>, /// Old streams created by optimization, each covering up to /// [StreamManagerConfig::max_subs_in_old_optimized] subscriptions. - optimized_old_streams: Vec, - _phantom: PhantomData, + optimized_old_streams: Vec>, } #[allow(unused)] @@ -87,7 +84,6 @@ impl> StreamManager { current_new_stream: None, unoptimized_old_streams: Vec::new(), optimized_old_streams: Vec::new(), - _phantom: PhantomData, } } @@ -194,7 +190,6 @@ impl> StreamManager { let refs: Vec<&Pubkey> = chunk.iter().collect(); self.stream_factory .subscribe(Self::build_account_request(&refs, commitment)) - .stream }) .collect(); @@ -244,7 +239,7 @@ impl> StreamManager { /// Returns references to all account streams (optimized old + /// unoptimized old + current-new) for inspection. - fn all_account_streams(&self) -> Vec<&LaserStream> { + fn all_account_streams(&self) -> Vec<&LaserStreamWithHandle> { let mut streams = Vec::new(); for s in &self.optimized_old_streams { streams.push(s); @@ -313,9 +308,9 @@ impl> StreamManager { &self, pubkeys: &[&Pubkey], commitment: &CommitmentLevel, - ) -> LaserStream { + ) -> LaserStreamWithHandle { let request = Self::build_account_request(pubkeys, commitment); - self.stream_factory.subscribe(request).stream + self.stream_factory.subscribe(request) } // ========================================================= @@ -331,7 +326,7 @@ impl> StreamManager { commitment: &CommitmentLevel, idx: usize, from_slot: Option, - ) -> super::LaserStream { + ) -> LaserStreamWithHandle { let mut accounts = HashMap::new(); accounts.insert( format!("account_subs: {idx}"), @@ -358,7 +353,7 @@ impl> StreamManager { from_slot, ..Default::default() }; - self.stream_factory.subscribe(request).stream + self.stream_factory.subscribe(request) } /// Adds a program subscription. If the program is already @@ -392,7 +387,9 @@ impl> StreamManager { /// Returns a mutable reference to the program subscriptions /// stream (if any) for polling in the actor loop. - pub fn program_stream_mut(&mut self) -> Option<&mut LaserStream> { + pub fn program_stream_mut( + &mut self, + ) -> Option<&mut LaserStreamWithHandle> { self.program_subscriptions.as_mut().map(|(_, s)| s) } @@ -411,7 +408,7 @@ impl> StreamManager { &self, program_ids: &[&Pubkey], commitment: &CommitmentLevel, - ) -> LaserStream { + ) -> LaserStreamWithHandle { let mut accounts = HashMap::new(); accounts.insert( "program_sub".to_string(), @@ -425,7 +422,7 @@ impl> StreamManager { commitment: Some((*commitment).into()), ..Default::default() }; - self.stream_factory.subscribe(request).stream + self.stream_factory.subscribe(request) } } From 38f5a1293517b505ffa01cc9b9b121c9cc161c78 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 10:24:18 +0700 Subject: [PATCH 52/64] chore: remove unnecessary phantom data --- .../src/remote_account_provider/chain_laser_actor/actor.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 287b04760..3e2a92cdd 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -1,7 +1,6 @@ use std::{ collections::HashSet, fmt, - marker::PhantomData, sync::{ atomic::{AtomicU16, AtomicU64, Ordering}, Arc, @@ -141,7 +140,6 @@ pub struct ChainLaserActor> { client_id: String, /// RPC client for diagnostics (e.g., fetching slot when falling behind) rpc_client: ChainRpcClientImpl, - _phantom: PhantomData, } impl ChainLaserActor { @@ -246,7 +244,6 @@ impl> ChainLaserActor { slots, client_id: client_id.to_string(), rpc_client, - _phantom: PhantomData, }; ( From 93bb3ac90dee5bd9cc275519fbfccafc59ee8442 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 11:00:31 +0700 Subject: [PATCH 53/64] chore: current subs updated via write to channel --- .../chain_laser_actor/stream_manager.rs | 288 +++++++++--------- 1 file changed, 152 insertions(+), 136 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 0ab59c242..ecbd2011d 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -97,7 +97,7 @@ impl> StreamManager { /// If the [Self::current_new_stream] exceeds [StreamManagerConfig::max_subs_in_new] it /// is promoted and a fresh one is created. If [Self::unoptimized_old_streams] exceed /// [StreamManagerConfig::max_old_unoptimized], optimization is triggered. - pub fn account_subscribe( + pub async fn account_subscribe( &mut self, pubkeys: &[Pubkey], commitment: &CommitmentLevel, @@ -118,12 +118,21 @@ impl> StreamManager { self.current_new_subs.insert(*pk); } - // (Re)create the current-new stream with the full - // current_new_subs filter. - self.current_new_stream = Some(self.create_account_stream( - &self.current_new_subs.iter().collect::>(), - commitment, - )); + // Update the current-new stream with the full + // current_new_subs filter (either create new if doesn't exist, + // or update existing via write). + if let Some(stream) = &self.current_new_stream { + let request = Self::build_account_request( + &self.current_new_subs.iter().collect::>(), + commitment, + ); + let _ = stream.handle.write(request).await; + } else { + self.current_new_stream = Some(self.create_account_stream( + &self.current_new_subs.iter().collect::>(), + commitment, + )); + } // Promote if current-new exceeds threshold. if self.current_new_subs.len() > self.config.max_subs_in_new { @@ -518,18 +527,18 @@ mod tests { /// Subscribe `n` pubkeys one-at-a-time, returning the created /// pubkeys. - fn subscribe_n( + async fn subscribe_n( mgr: &mut StreamManager, n: usize, ) -> Vec { let pks = make_pubkeys(n); - mgr.account_subscribe(&pks, &COMMITMENT); + mgr.account_subscribe(&pks, &COMMITMENT).await; pks } /// Subscribe pubkeys in batches of `batch` until `total` pubkeys /// have been subscribed. Returns all created pubkeys. - fn subscribe_in_batches( + async fn subscribe_in_batches( mgr: &mut StreamManager, total: usize, batch: usize, @@ -539,7 +548,7 @@ mod tests { while remaining > 0 { let n = remaining.min(batch); let pks = make_pubkeys(n); - mgr.account_subscribe(&pks, &COMMITMENT); + mgr.account_subscribe(&pks, &COMMITMENT).await; all.extend(pks); remaining -= n; } @@ -564,12 +573,12 @@ mod tests { // 1. Subscription Tracking // ------------------------------------------------------------- - #[test] - fn test_subscribe_single_pubkey_adds_to_subscriptions() { + #[tokio::test] + async fn test_subscribe_single_pubkey_adds_to_subscriptions() { let (mut mgr, factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_subscribe(&[pk], &COMMITMENT).await; assert_subscriptions_eq(&mgr, &[pk]); @@ -578,12 +587,12 @@ mod tests { assert_request_has_exact_pubkeys(&reqs[0], &[pk]); } - #[test] - fn test_subscribe_multiple_pubkeys_at_once() { + #[tokio::test] + async fn test_subscribe_multiple_pubkeys_at_once() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(5); - mgr.account_subscribe(&pks, &COMMITMENT); + mgr.account_subscribe(&pks, &COMMITMENT).await; assert_subscriptions_eq(&mgr, &pks); @@ -592,72 +601,79 @@ mod tests { assert_request_has_exact_pubkeys(&reqs[0], &pks); } - #[test] - fn test_subscribe_duplicate_pubkey_is_noop() { + #[tokio::test] + async fn test_subscribe_duplicate_pubkey_is_noop() { let (mut mgr, factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_subscribe(&[pk], &COMMITMENT).await; let calls_after_first = factory.captured_requests().len(); - mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_subscribe(&[pk], &COMMITMENT).await; assert_subscriptions_eq(&mgr, &[pk]); assert_eq!(factory.captured_requests().len(), calls_after_first); } - #[test] - fn test_subscribe_incremental_calls_accumulate() { + #[tokio::test] + async fn test_subscribe_incremental_calls_accumulate() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(3); - mgr.account_subscribe(&[pks[0]], &COMMITMENT); - mgr.account_subscribe(&[pks[1]], &COMMITMENT); - mgr.account_subscribe(&[pks[2]], &COMMITMENT); + mgr.account_subscribe(&[pks[0]], &COMMITMENT).await; + mgr.account_subscribe(&[pks[1]], &COMMITMENT).await; + mgr.account_subscribe(&[pks[2]], &COMMITMENT).await; assert_subscriptions_eq(&mgr, &pks); + // First subscribe call creates the stream with just pks[0] let reqs = factory.captured_requests(); - let last_req = reqs.last().unwrap(); - assert_request_has_exact_pubkeys(last_req, &pks); + assert_eq!(reqs.len(), 1); + assert_request_has_exact_pubkeys(&reqs[0], &[pks[0]]); + + // Subsequent calls update via handle.write() which accumulates + let handle_reqs = factory.handle_requests(); + assert!(!handle_reqs.is_empty()); + let last_handle_req = handle_reqs.last().unwrap(); + assert_request_has_exact_pubkeys(last_handle_req, &pks); } // ------------------------------------------------------------- // 2. Current-New Stream Lifecycle // ------------------------------------------------------------- - #[test] - fn test_new_stream_created_on_first_subscribe() { + #[tokio::test] + async fn test_new_stream_created_on_first_subscribe() { let (mut mgr, factory) = create_manager(); assert_eq!(mgr.account_stream_count(), 0); - subscribe_n(&mut mgr, 1); + subscribe_n(&mut mgr, 1).await; assert_eq!(mgr.account_stream_count(), 1); assert_eq!(factory.active_stream_count(), 1); } - #[test] - fn test_current_new_stream_stays_below_threshold() { + #[tokio::test] + async fn test_current_new_stream_stays_below_threshold() { let (mut mgr, _factory) = create_manager(); // MAX_NEW - 1 = 4 - subscribe_in_batches(&mut mgr, 4, 2); + subscribe_in_batches(&mut mgr, 4, 2).await; assert_eq!(mgr.account_stream_count(), 1); assert_eq!(mgr.unoptimized_old_stream_count(), 0); } - #[test] - fn test_current_new_stream_promoted_at_threshold() { + #[tokio::test] + async fn test_current_new_stream_promoted_at_threshold() { let (mut mgr, factory) = create_manager(); // Subscribe MAX_NEW (5) pubkeys first. let first_five = make_pubkeys(5); - mgr.account_subscribe(&first_five, &COMMITMENT); + mgr.account_subscribe(&first_five, &COMMITMENT).await; assert_eq!(mgr.unoptimized_old_stream_count(), 0); // Subscribe the 6th pubkey → triggers promotion. let sixth = Pubkey::new_unique(); - mgr.account_subscribe(&[sixth], &COMMITMENT); + mgr.account_subscribe(&[sixth], &COMMITMENT).await; assert_eq!(mgr.unoptimized_old_stream_count(), 1); // A new current-new stream was created for the 6th pubkey. @@ -668,16 +684,16 @@ mod tests { assert!(reqs.len() >= 2); } - #[test] - fn test_multiple_promotions_accumulate_unoptimized() { + #[tokio::test] + async fn test_multiple_promotions_accumulate_unoptimized() { let (mut mgr, _factory) = create_manager(); // First promotion: subscribe 6 pubkeys (exceeds MAX_NEW=5). - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; assert_eq!(mgr.unoptimized_old_stream_count(), 1); // Second promotion: subscribe 5 more to fill the new current, // then 1 more to exceed. - subscribe_n(&mut mgr, 5); + subscribe_n(&mut mgr, 5).await; assert_eq!(mgr.unoptimized_old_stream_count(), 2); // Current-new stream should only hold the overflow pubkeys. @@ -688,19 +704,19 @@ mod tests { // 3. Optimization Trigger via MAX_OLD_UNOPTIMIZED // ------------------------------------------------------------- - #[test] - fn test_optimization_triggered_when_unoptimized_exceeds_max() { + #[tokio::test] + async fn test_optimization_triggered_when_unoptimized_exceeds_max() { let (mut mgr, _factory) = create_manager(); // MAX_OLD_UNOPTIMIZED = 3. We need 4 promotions. // Each promotion needs > MAX_NEW (5) pubkeys in current-new. // Subscribe 6 four times → 4 promotions. for _ in 0..3 { - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; } assert_eq!(mgr.unoptimized_old_stream_count(), 3); // 4th promotion triggers optimization. - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; // After optimization: unoptimized should be empty. assert_eq!(mgr.unoptimized_old_stream_count(), 0); @@ -710,12 +726,12 @@ mod tests { assert_eq!(mgr.optimized_old_stream_count(), expected_optimized,); } - #[test] - fn test_optimization_not_triggered_below_max_unoptimized() { + #[tokio::test] + async fn test_optimization_not_triggered_below_max_unoptimized() { let (mut mgr, _factory) = create_manager(); // Exactly MAX_OLD_UNOPTIMIZED (3) promotions. for _ in 0..3 { - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; } assert_eq!(mgr.unoptimized_old_stream_count(), 3); assert_eq!(mgr.optimized_old_stream_count(), 0); @@ -725,10 +741,10 @@ mod tests { // 4. Manual / Interval-Driven Optimization // ------------------------------------------------------------- - #[test] - fn test_optimize_creates_correct_number_of_optimized_streams() { + #[tokio::test] + async fn test_optimize_creates_correct_number_of_optimized_streams() { let (mut mgr, _factory) = create_manager(); - subscribe_n(&mut mgr, 25); + subscribe_n(&mut mgr, 25).await; mgr.optimize(&COMMITMENT); @@ -736,12 +752,12 @@ mod tests { assert_eq!(mgr.optimized_old_stream_count(), 3); } - #[test] - fn test_optimize_clears_unoptimized_old_streams() { + #[tokio::test] + async fn test_optimize_clears_unoptimized_old_streams() { let (mut mgr, _factory) = create_manager(); // Create several unoptimized old streams. for _ in 0..3 { - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; } assert!(mgr.unoptimized_old_stream_count() > 0); @@ -751,20 +767,20 @@ mod tests { assert!(mgr.optimized_old_stream_count() > 0); } - #[test] - fn test_optimize_resets_current_new_stream() { + #[tokio::test] + async fn test_optimize_resets_current_new_stream() { let (mut mgr, _factory) = create_manager(); - subscribe_n(&mut mgr, 8); + subscribe_n(&mut mgr, 8).await; mgr.optimize(&COMMITMENT); assert_eq!(mgr.current_new_sub_count(), 0); } - #[test] - fn test_optimize_excludes_unsubscribed_pubkeys() { + #[tokio::test] + async fn test_optimize_excludes_unsubscribed_pubkeys() { let (mut mgr, factory) = create_manager(); - let pks = subscribe_n(&mut mgr, 15); + let pks = subscribe_n(&mut mgr, 15).await; // Unsubscribe 5 of them. let to_unsub: Vec = pks[0..5].to_vec(); @@ -793,10 +809,10 @@ mod tests { } } - #[test] - fn test_optimize_with_zero_subscriptions() { + #[tokio::test] + async fn test_optimize_with_zero_subscriptions() { let (mut mgr, _factory) = create_manager(); - let pks = subscribe_n(&mut mgr, 5); + let pks = subscribe_n(&mut mgr, 5).await; mgr.account_unsubscribe(&pks); mgr.optimize(&COMMITMENT); @@ -805,10 +821,10 @@ mod tests { assert_eq!(mgr.unoptimized_old_stream_count(), 0); } - #[test] - fn test_optimize_idempotent() { + #[tokio::test] + async fn test_optimize_idempotent() { let (mut mgr, _factory) = create_manager(); - subscribe_n(&mut mgr, 15); + subscribe_n(&mut mgr, 15).await; mgr.optimize(&COMMITMENT); let count_after_first = mgr.optimized_old_stream_count(); @@ -821,39 +837,39 @@ mod tests { // 5. Behavior During Optimization // ------------------------------------------------------------- - #[test] - fn test_subscribe_during_optimization_goes_to_current_new() { + #[tokio::test] + async fn test_subscribe_during_optimization_goes_to_current_new() { let (mut mgr, _factory) = create_manager(); - subscribe_n(&mut mgr, 20); + subscribe_n(&mut mgr, 20).await; mgr.optimize(&COMMITMENT); // Subscribe a new pubkey after optimization. let new_pk = Pubkey::new_unique(); - mgr.account_subscribe(&[new_pk], &COMMITMENT); + mgr.account_subscribe(&[new_pk], &COMMITMENT).await; assert!(mgr.subscriptions().contains(&new_pk)); assert!(mgr.current_new_subs().contains(&new_pk)); } - #[test] - fn test_no_double_optimization_trigger() { + #[tokio::test] + async fn test_no_double_optimization_trigger() { let (mut mgr, _factory) = create_manager(); // Fill up to MAX_OLD_UNOPTIMIZED. for _ in 0..3 { - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; } assert_eq!(mgr.unoptimized_old_stream_count(), 3); // 4th promotion triggers optimization. - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; assert_eq!(mgr.unoptimized_old_stream_count(), 0); let optimized_after_first = mgr.optimized_old_stream_count(); // Now subscribe enough to exceed MAX_SUBS_IN_NEW again, // causing a promotion. Since optimization just ran, it should // NOT trigger again immediately. - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; // Unoptimized grows by 1 but no second optimization. assert!(mgr.unoptimized_old_stream_count() <= 1); assert_eq!(mgr.optimized_old_stream_count(), optimized_after_first,); @@ -863,11 +879,11 @@ mod tests { // 6. Unsubscribe // ------------------------------------------------------------- - #[test] - fn test_unsubscribe_removes_from_subscriptions_set() { + #[tokio::test] + async fn test_unsubscribe_removes_from_subscriptions_set() { let (mut mgr, _factory) = create_manager(); let pks = make_pubkeys(3); - mgr.account_subscribe(&pks, &COMMITMENT); + mgr.account_subscribe(&pks, &COMMITMENT).await; mgr.account_unsubscribe(&[pks[1]]); @@ -884,11 +900,11 @@ mod tests { assert!(mgr.subscriptions().is_empty()); } - #[test] - fn test_unsubscribe_already_unsubscribed_pubkey() { + #[tokio::test] + async fn test_unsubscribe_already_unsubscribed_pubkey() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_subscribe(&[pk], &COMMITMENT).await; mgr.account_unsubscribe(&[pk]); mgr.account_unsubscribe(&[pk]); @@ -896,11 +912,11 @@ mod tests { assert!(mgr.subscriptions().is_empty()); } - #[test] - fn test_unsubscribe_does_not_modify_streams() { + #[tokio::test] + async fn test_unsubscribe_does_not_modify_streams() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(4); - mgr.account_subscribe(&pks, &COMMITMENT); + mgr.account_subscribe(&pks, &COMMITMENT).await; let calls_before = factory.captured_requests().len(); mgr.account_unsubscribe(&pks[0..2]); @@ -913,11 +929,11 @@ mod tests { } } - #[test] - fn test_unsubscribe_all_then_optimize_clears_streams() { + #[tokio::test] + async fn test_unsubscribe_all_then_optimize_clears_streams() { let (mut mgr, _factory) = create_manager(); // Subscribe 8 pubkeys (creates current-new + 1 unoptimized). - let pks = subscribe_n(&mut mgr, 8); + let pks = subscribe_n(&mut mgr, 8).await; mgr.account_unsubscribe(&pks); mgr.optimize(&COMMITMENT); @@ -926,11 +942,11 @@ mod tests { assert_eq!(mgr.unoptimized_old_stream_count(), 0); } - #[test] - fn test_unsubscribe_batch() { + #[tokio::test] + async fn test_unsubscribe_batch() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(5); - mgr.account_subscribe(&pks, &COMMITMENT); + mgr.account_subscribe(&pks, &COMMITMENT).await; let calls_before = factory.captured_requests().len(); mgr.account_unsubscribe(&[pks[0], pks[2], pks[4]]); @@ -943,20 +959,20 @@ mod tests { // 7. Subscription Membership Check // ------------------------------------------------------------- - #[test] - fn test_is_subscribed_returns_true_for_active() { + #[tokio::test] + async fn test_is_subscribed_returns_true_for_active() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_subscribe(&[pk], &COMMITMENT).await; assert!(mgr.is_subscribed(&pk)); } - #[test] - fn test_is_subscribed_returns_false_after_unsubscribe() { + #[tokio::test] + async fn test_is_subscribed_returns_false_after_unsubscribe() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_subscribe(&[pk], &COMMITMENT).await; mgr.account_unsubscribe(&[pk]); assert!(!mgr.is_subscribed(&pk)); @@ -974,15 +990,15 @@ mod tests { // 8. Stream Enumeration / Polling Access // ------------------------------------------------------------- - #[test] - fn test_all_account_streams_includes_all_generations() { + #[tokio::test] + async fn test_all_account_streams_includes_all_generations() { let (mut mgr, _factory) = create_manager(); // Create optimized old streams. - subscribe_n(&mut mgr, 15); + subscribe_n(&mut mgr, 15).await; mgr.optimize(&COMMITMENT); // Create an unoptimized old stream via promotion. - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; // Current-new also exists from the overflow pubkey. let expected = mgr.account_stream_count(); @@ -998,12 +1014,12 @@ mod tests { assert!(streams.is_empty()); } - #[test] - fn test_all_account_streams_after_optimize_drops_old_unoptimized() { + #[tokio::test] + async fn test_all_account_streams_after_optimize_drops_old_unoptimized() { let (mut mgr, _factory) = create_manager(); // Create unoptimized old streams. for _ in 0..2 { - subscribe_n(&mut mgr, 6); + subscribe_n(&mut mgr, 6).await; } assert!(mgr.unoptimized_old_stream_count() > 0); @@ -1020,20 +1036,20 @@ mod tests { // 9. Edge Cases and Stress // ------------------------------------------------------------- - #[test] - fn test_subscribe_exactly_at_max_subs_in_new_no_promotion() { + #[tokio::test] + async fn test_subscribe_exactly_at_max_subs_in_new_no_promotion() { let (mut mgr, _factory) = create_manager(); // Exactly MAX_NEW (5) pubkeys — should NOT promote. - subscribe_n(&mut mgr, 5); + subscribe_n(&mut mgr, 5).await; assert_eq!(mgr.unoptimized_old_stream_count(), 0); assert_eq!(mgr.account_stream_count(), 1); } - #[test] - fn test_single_pubkey_optimization() { + #[tokio::test] + async fn test_single_pubkey_optimization() { let (mut mgr, _factory) = create_manager(); - subscribe_n(&mut mgr, 1); + subscribe_n(&mut mgr, 1).await; mgr.optimize(&COMMITMENT); @@ -1041,21 +1057,21 @@ mod tests { assert_eq!(mgr.current_new_sub_count(), 0); } - #[test] - fn test_subscribe_max_old_optimized_plus_one() { + #[tokio::test] + async fn test_subscribe_max_old_optimized_plus_one() { let (mut mgr, _factory) = create_manager(); // MAX_OLD_OPTIMIZED + 1 = 11 - subscribe_n(&mut mgr, 11); + subscribe_n(&mut mgr, 11).await; mgr.optimize(&COMMITMENT); assert_eq!(mgr.optimized_old_stream_count(), 2); } - #[test] - fn test_large_scale_subscribe_and_optimize() { + #[tokio::test] + async fn test_large_scale_subscribe_and_optimize() { let (mut mgr, factory) = create_manager(); - let pks = subscribe_n(&mut mgr, 50); + let pks = subscribe_n(&mut mgr, 50).await; let reqs_before = factory.captured_requests().len(); mgr.optimize(&COMMITMENT); @@ -1074,17 +1090,17 @@ mod tests { } } - #[test] - fn test_interleaved_subscribe_unsubscribe_then_optimize() { + #[tokio::test] + async fn test_interleaved_subscribe_unsubscribe_then_optimize() { let (mut mgr, factory) = create_manager(); - let pks = subscribe_n(&mut mgr, 20); + let pks = subscribe_n(&mut mgr, 20).await; // Unsubscribe 8 scattered. let unsub1: Vec = pks.iter().step_by(2).take(8).copied().collect(); mgr.account_unsubscribe(&unsub1); // Subscribe 5 new ones. - let new_pks = subscribe_n(&mut mgr, 5); + let new_pks = subscribe_n(&mut mgr, 5).await; // Unsubscribe 2 of the new ones. mgr.account_unsubscribe(&new_pks[0..2]); @@ -1105,14 +1121,14 @@ mod tests { } } - #[test] - fn test_rapid_subscribe_unsubscribe_same_pubkey() { + #[tokio::test] + async fn test_rapid_subscribe_unsubscribe_same_pubkey() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_subscribe(&[pk], &COMMITMENT).await; mgr.account_unsubscribe(&[pk]); - mgr.account_subscribe(&[pk], &COMMITMENT); + mgr.account_subscribe(&[pk], &COMMITMENT).await; assert!(mgr.subscriptions().contains(&pk)); assert!(mgr.current_new_subs().contains(&pk)); @@ -1122,13 +1138,13 @@ mod tests { // 10. Stream Factory Interaction Verification // ------------------------------------------------------------- - #[test] - fn test_factory_called_with_correct_commitment() { + #[tokio::test] + async fn test_factory_called_with_correct_commitment() { let (mut mgr, factory) = create_manager(); let commitment = CommitmentLevel::Finalized; let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &commitment); + mgr.account_subscribe(&[pk], &commitment).await; let reqs = factory.captured_requests(); assert_eq!(reqs.len(), 1); @@ -1138,19 +1154,19 @@ mod tests { ); } - #[test] - fn test_factory_called_with_slot_filter() { + #[tokio::test] + async fn test_factory_called_with_slot_filter() { let (mut mgr, factory) = create_manager(); - subscribe_n(&mut mgr, 1); + subscribe_n(&mut mgr, 1).await; let reqs = factory.captured_requests(); assert!(!reqs[0].slots.is_empty()); } - #[test] - fn test_optimize_factory_calls_contain_chunked_pubkeys() { + #[tokio::test] + async fn test_optimize_factory_calls_contain_chunked_pubkeys() { let (mut mgr, factory) = create_manager(); - subscribe_n(&mut mgr, 15); + subscribe_n(&mut mgr, 15).await; let reqs_before = factory.captured_requests().len(); mgr.optimize(&COMMITMENT); @@ -1171,10 +1187,10 @@ mod tests { assert!(first_pks.is_disjoint(&second_pks)); } - #[test] - fn test_factory_not_called_on_unsubscribe() { + #[tokio::test] + async fn test_factory_not_called_on_unsubscribe() { let (mut mgr, factory) = create_manager(); - subscribe_n(&mut mgr, 5); + subscribe_n(&mut mgr, 5).await; let calls_before = factory.captured_requests().len(); let pks: Vec = From fabc155ffddd0fd81e95267f4c288555ade9aca6 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 11:07:39 +0700 Subject: [PATCH 54/64] chore: adding retry logic to sub write --- .../chain_laser_actor/stream_manager.rs | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index ecbd2011d..7da0ef1cf 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -4,7 +4,9 @@ use helius_laserstream::grpc::{ CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots, }; +use helius_laserstream::LaserstreamError; use solana_pubkey::Pubkey; +use tokio::time::Duration; use super::{LaserStreamWithHandle, StreamFactory}; use crate::remote_account_provider::chain_laser_actor::StreamHandle; @@ -87,6 +89,38 @@ impl> StreamManager { } } + /// Update a stream's subscriptions with retry logic. + /// + /// Attempts to write the given request to the stream handle up to 5 + /// times with linear backoff. Returns an error if all retries are + /// exhausted. + async fn update_subscriptions( + handle: &S, + request: SubscribeRequest, + ) -> Result<(), LaserstreamError> { + const MAX_RETRIES: usize = 5; + let mut retries = MAX_RETRIES; + let initial_retries = retries; + + loop { + match handle.write(request.clone()).await { + Ok(()) => return Ok(()), + Err(err) => { + if retries > 0 { + retries -= 1; + // Linear backoff: sleep longer as retries decrease + let backoff_ms = + 50u64 * (initial_retries - retries) as u64; + tokio::time::sleep(Duration::from_millis(backoff_ms)) + .await; + continue; + } + return Err(err); + } + } + } + } + // --------------------- // Account subscription // --------------------- @@ -126,7 +160,7 @@ impl> StreamManager { &self.current_new_subs.iter().collect::>(), commitment, ); - let _ = stream.handle.write(request).await; + let _ = Self::update_subscriptions(&stream.handle, request).await; } else { self.current_new_stream = Some(self.create_account_stream( &self.current_new_subs.iter().collect::>(), From a0c61163e1492c0d36bcc6bcb2a0a2fb00159ab8 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 11:37:35 +0700 Subject: [PATCH 55/64] chore: bubble errors from account_subscribe --- .../chain_laser_actor/stream_manager.rs | 72 +++++++++++-------- .../src/remote_account_provider/errors.rs | 5 ++ 2 files changed, 49 insertions(+), 28 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 7da0ef1cf..9adf16ff8 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -4,12 +4,14 @@ use helius_laserstream::grpc::{ CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots, }; -use helius_laserstream::LaserstreamError; use solana_pubkey::Pubkey; use tokio::time::Duration; use super::{LaserStreamWithHandle, StreamFactory}; -use crate::remote_account_provider::chain_laser_actor::StreamHandle; +use crate::remote_account_provider::{ + chain_laser_actor::StreamHandle, RemoteAccountProviderError, + RemoteAccountProviderResult, +}; /// Configuration for the generational stream manager. #[allow(unused)] @@ -96,8 +98,9 @@ impl> StreamManager { /// exhausted. async fn update_subscriptions( handle: &S, + task: &str, request: SubscribeRequest, - ) -> Result<(), LaserstreamError> { + ) -> RemoteAccountProviderResult<()> { const MAX_RETRIES: usize = 5; let mut retries = MAX_RETRIES; let initial_retries = retries; @@ -115,7 +118,11 @@ impl> StreamManager { .await; continue; } - return Err(err); + return Err(RemoteAccountProviderError::GrpcSubscriptionUpdateFailed( + task.to_string(), + MAX_RETRIES, + format!("{err} ({err:?}"), + )); } } } @@ -135,7 +142,7 @@ impl> StreamManager { &mut self, pubkeys: &[Pubkey], commitment: &CommitmentLevel, - ) { + ) -> RemoteAccountProviderResult<()> { // Filter out pubkeys already in subscriptions. let new_pks: Vec = pubkeys .iter() @@ -144,7 +151,7 @@ impl> StreamManager { .collect(); if new_pks.is_empty() { - return; + return Ok(()); } for pk in &new_pks { @@ -160,7 +167,12 @@ impl> StreamManager { &self.current_new_subs.iter().collect::>(), commitment, ); - let _ = Self::update_subscriptions(&stream.handle, request).await; + Self::update_subscriptions( + &stream.handle, + "account_subscribe", + request, + ) + .await? } else { self.current_new_stream = Some(self.create_account_stream( &self.current_new_subs.iter().collect::>(), @@ -202,6 +214,8 @@ impl> StreamManager { self.optimize(commitment); } } + + Ok(()) } /// Unsubscribe the given pubkeys. @@ -566,7 +580,7 @@ mod tests { n: usize, ) -> Vec { let pks = make_pubkeys(n); - mgr.account_subscribe(&pks, &COMMITMENT).await; + mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); pks } @@ -582,7 +596,7 @@ mod tests { while remaining > 0 { let n = remaining.min(batch); let pks = make_pubkeys(n); - mgr.account_subscribe(&pks, &COMMITMENT).await; + mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); all.extend(pks); remaining -= n; } @@ -612,7 +626,7 @@ mod tests { let (mut mgr, factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await; + mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); assert_subscriptions_eq(&mgr, &[pk]); @@ -626,7 +640,7 @@ mod tests { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(5); - mgr.account_subscribe(&pks, &COMMITMENT).await; + mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); assert_subscriptions_eq(&mgr, &pks); @@ -640,10 +654,10 @@ mod tests { let (mut mgr, factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await; + mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); let calls_after_first = factory.captured_requests().len(); - mgr.account_subscribe(&[pk], &COMMITMENT).await; + mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); assert_subscriptions_eq(&mgr, &[pk]); assert_eq!(factory.captured_requests().len(), calls_after_first); @@ -654,9 +668,9 @@ mod tests { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(3); - mgr.account_subscribe(&[pks[0]], &COMMITMENT).await; - mgr.account_subscribe(&[pks[1]], &COMMITMENT).await; - mgr.account_subscribe(&[pks[2]], &COMMITMENT).await; + mgr.account_subscribe(&[pks[0]], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pks[1]], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pks[2]], &COMMITMENT).await.unwrap(); assert_subscriptions_eq(&mgr, &pks); @@ -702,12 +716,14 @@ mod tests { let (mut mgr, factory) = create_manager(); // Subscribe MAX_NEW (5) pubkeys first. let first_five = make_pubkeys(5); - mgr.account_subscribe(&first_five, &COMMITMENT).await; + mgr.account_subscribe(&first_five, &COMMITMENT) + .await + .unwrap(); assert_eq!(mgr.unoptimized_old_stream_count(), 0); // Subscribe the 6th pubkey → triggers promotion. let sixth = Pubkey::new_unique(); - mgr.account_subscribe(&[sixth], &COMMITMENT).await; + mgr.account_subscribe(&[sixth], &COMMITMENT).await.unwrap(); assert_eq!(mgr.unoptimized_old_stream_count(), 1); // A new current-new stream was created for the 6th pubkey. @@ -880,7 +896,7 @@ mod tests { // Subscribe a new pubkey after optimization. let new_pk = Pubkey::new_unique(); - mgr.account_subscribe(&[new_pk], &COMMITMENT).await; + mgr.account_subscribe(&[new_pk], &COMMITMENT).await.unwrap(); assert!(mgr.subscriptions().contains(&new_pk)); assert!(mgr.current_new_subs().contains(&new_pk)); @@ -917,7 +933,7 @@ mod tests { async fn test_unsubscribe_removes_from_subscriptions_set() { let (mut mgr, _factory) = create_manager(); let pks = make_pubkeys(3); - mgr.account_subscribe(&pks, &COMMITMENT).await; + mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); mgr.account_unsubscribe(&[pks[1]]); @@ -938,7 +954,7 @@ mod tests { async fn test_unsubscribe_already_unsubscribed_pubkey() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await; + mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); mgr.account_unsubscribe(&[pk]); mgr.account_unsubscribe(&[pk]); @@ -950,7 +966,7 @@ mod tests { async fn test_unsubscribe_does_not_modify_streams() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(4); - mgr.account_subscribe(&pks, &COMMITMENT).await; + mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); let calls_before = factory.captured_requests().len(); mgr.account_unsubscribe(&pks[0..2]); @@ -980,7 +996,7 @@ mod tests { async fn test_unsubscribe_batch() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(5); - mgr.account_subscribe(&pks, &COMMITMENT).await; + mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); let calls_before = factory.captured_requests().len(); mgr.account_unsubscribe(&[pks[0], pks[2], pks[4]]); @@ -997,7 +1013,7 @@ mod tests { async fn test_is_subscribed_returns_true_for_active() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await; + mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); assert!(mgr.is_subscribed(&pk)); } @@ -1006,7 +1022,7 @@ mod tests { async fn test_is_subscribed_returns_false_after_unsubscribe() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await; + mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); mgr.account_unsubscribe(&[pk]); assert!(!mgr.is_subscribed(&pk)); @@ -1160,9 +1176,9 @@ mod tests { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await; + mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); mgr.account_unsubscribe(&[pk]); - mgr.account_subscribe(&[pk], &COMMITMENT).await; + mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); assert!(mgr.subscriptions().contains(&pk)); assert!(mgr.current_new_subs().contains(&pk)); @@ -1178,7 +1194,7 @@ mod tests { let commitment = CommitmentLevel::Finalized; let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &commitment).await; + mgr.account_subscribe(&[pk], &commitment).await.unwrap(); let reqs = factory.captured_requests(); assert_eq!(reqs.len(), 1); diff --git a/magicblock-chainlink/src/remote_account_provider/errors.rs b/magicblock-chainlink/src/remote_account_provider/errors.rs index 15d7415e2..4284ef2c6 100644 --- a/magicblock-chainlink/src/remote_account_provider/errors.rs +++ b/magicblock-chainlink/src/remote_account_provider/errors.rs @@ -114,6 +114,11 @@ pub enum RemoteAccountProviderError { "The LoaderV4 program {0} account state deserialization failed: {1}" )] LoaderV4StateDeserializationFailed(Pubkey, String), + + #[error( + "Failed to update gRPC subscription to {0} after {1} retries: {2}" + )] + GrpcSubscriptionUpdateFailed(String, usize, String), } impl From for RemoteAccountProviderError From c8fd9da2ed0c51a8958bafa36bf01617461bfb05 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 12:48:30 +0700 Subject: [PATCH 56/64] chore: minor clarification --- .../src/remote_account_provider/chain_laser_actor/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index e9be0aa60..84275edfb 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -58,6 +58,12 @@ impl StreamHandle for StreamHandleImpl { &self, request: SubscribeRequest, ) -> Result<(), LaserstreamError> { + // This async operation gets forwarded to the underlying subscription sender of the laser + // client and completes after the given item has been fully processed into the sink, + // including flushing. + // The assumption is that at that point it has been processed on the receiver end and the + // subscription is updated. + // See: https://github.com/helius-labs/laserstream-sdk/blob/v0.2.2/rust/src/client.rs#L196-L201 self.handle.write(request).await } } From 26a6f513e2c865f55477d9c200b9261743714b72 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 12:55:35 +0700 Subject: [PATCH 57/64] chore: program subs reuse stream via handle write --- .../chain_laser_actor/actor.rs | 22 +++++--- .../chain_laser_actor/stream_manager.rs | 52 ++++++++++++++----- 2 files changed, 55 insertions(+), 19 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 3e2a92cdd..1df33d8a9 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -275,7 +275,7 @@ impl> ChainLaserActor { msg = self.messages_receiver.recv() => { match msg { Some(msg) => { - if self.handle_msg(msg) { + if self.handle_msg(msg).await { break; } } @@ -336,7 +336,7 @@ impl> ChainLaserActor { } } - fn handle_msg(&mut self, msg: ChainPubsubActorMessage) -> bool { + async fn handle_msg(&mut self, msg: ChainPubsubActorMessage) -> bool { use ChainPubsubActorMessage::*; match msg { AccountSubscribe { @@ -350,11 +350,19 @@ impl> ChainLaserActor { false } ProgramSubscribe { pubkey, response } => { - self.stream_manager - .add_program_subscription(pubkey, &self.commitment); - let _ = response.send(Ok(())).inspect_err(|_| { - warn!(client_id = self.client_id, program_id = %pubkey, "Failed to send program subscribe response"); - }); + let result = self + .stream_manager + .add_program_subscription(pubkey, &self.commitment) + .await; + if let Err(e) = result { + let _ = response.send(Err(e)).inspect_err(|_| { + warn!(client_id = self.client_id, program_id = %pubkey, "Failed to send program subscribe response"); + }); + } else { + let _ =response.send(Ok(())).inspect_err(|_| { + warn!(client_id = self.client_id, program_id = %pubkey, "Failed to send program subscribe response"); + }); + }; false } Reconnect { response } => { diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 9adf16ff8..d148c4a46 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -414,19 +414,19 @@ impl> StreamManager { } /// Adds a program subscription. If the program is already - /// subscribed, this is a no-op. Otherwise, recreates the program + /// subscribed, this is a no-op. Otherwise, updates the program /// stream to include all subscribed programs. - pub fn add_program_subscription( + pub async fn add_program_subscription( &mut self, program_id: Pubkey, commitment: &CommitmentLevel, - ) { + ) -> RemoteAccountProviderResult<()> { if self .program_subscriptions .as_ref() .is_some_and(|(subs, _)| subs.contains(&program_id)) { - return; + return Ok(()); } let mut subscribed_programs = self @@ -438,8 +438,27 @@ impl> StreamManager { subscribed_programs.insert(program_id); let program_ids: Vec<&Pubkey> = subscribed_programs.iter().collect(); - let stream = self.create_program_stream(&program_ids, commitment); - self.program_subscriptions = Some((subscribed_programs, stream)); + let request = Self::build_program_request(&program_ids, commitment); + + if let Some((_, stream)) = &self.program_subscriptions { + // Update existing stream + Self::update_subscriptions( + &stream.handle, + "program_subscribe", + request, + ) + .await?; + // Update the set of subscribed programs + if let Some((subs, _)) = &mut self.program_subscriptions { + *subs = subscribed_programs; + } + } else { + // Create new stream + let stream = self.create_program_stream(&program_ids, commitment); + self.program_subscriptions = Some((subscribed_programs, stream)); + } + + Ok(()) } /// Returns a mutable reference to the program subscriptions @@ -460,12 +479,11 @@ impl> StreamManager { self.program_subscriptions = None; } - /// Creates a subscription stream for program updates. - fn create_program_stream( - &self, + /// Build a `SubscribeRequest` for the given program IDs. + fn build_program_request( program_ids: &[&Pubkey], commitment: &CommitmentLevel, - ) -> LaserStreamWithHandle { + ) -> SubscribeRequest { let mut accounts = HashMap::new(); accounts.insert( "program_sub".to_string(), @@ -474,11 +492,21 @@ impl> StreamManager { ..Default::default() }, ); - let request = SubscribeRequest { + + SubscribeRequest { accounts, commitment: Some((*commitment).into()), ..Default::default() - }; + } + } + + /// Creates a subscription stream for program updates. + fn create_program_stream( + &self, + program_ids: &[&Pubkey], + commitment: &CommitmentLevel, + ) -> LaserStreamWithHandle { + let request = Self::build_program_request(program_ids, commitment); self.stream_factory.subscribe(request) } } From 558a1ae9ecd0d747e83c8de52c73ae152a4bb664 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 15:02:10 +0700 Subject: [PATCH 58/64] chore: support account sub from_slot in stream manager --- .../chain_laser_actor/stream_manager.rs | 169 +++++++++++++++--- 1 file changed, 146 insertions(+), 23 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index d148c4a46..e97830c9e 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -142,6 +142,7 @@ impl> StreamManager { &mut self, pubkeys: &[Pubkey], commitment: &CommitmentLevel, + from_slot: Option, ) -> RemoteAccountProviderResult<()> { // Filter out pubkeys already in subscriptions. let new_pks: Vec = pubkeys @@ -166,6 +167,7 @@ impl> StreamManager { let request = Self::build_account_request( &self.current_new_subs.iter().collect::>(), commitment, + from_slot, ); Self::update_subscriptions( &stream.handle, @@ -177,6 +179,7 @@ impl> StreamManager { self.current_new_stream = Some(self.create_account_stream( &self.current_new_subs.iter().collect::>(), commitment, + from_slot, )); } @@ -204,6 +207,7 @@ impl> StreamManager { self.current_new_stream = Some(self.create_account_stream( &overflow_pks.iter().collect::>(), commitment, + from_slot, )); } @@ -246,7 +250,7 @@ impl> StreamManager { .map(|chunk| { let refs: Vec<&Pubkey> = chunk.iter().collect(); self.stream_factory - .subscribe(Self::build_account_request(&refs, commitment)) + .subscribe(Self::build_account_request(&refs, commitment, None)) }) .collect(); @@ -332,6 +336,7 @@ impl> StreamManager { fn build_account_request( pubkeys: &[&Pubkey], commitment: &CommitmentLevel, + from_slot: Option, ) -> SubscribeRequest { let mut accounts = HashMap::new(); accounts.insert( @@ -355,6 +360,7 @@ impl> StreamManager { accounts, slots, commitment: Some((*commitment).into()), + from_slot, ..Default::default() } } @@ -365,8 +371,10 @@ impl> StreamManager { &self, pubkeys: &[&Pubkey], commitment: &CommitmentLevel, + from_slot: Option, ) -> LaserStreamWithHandle { - let request = Self::build_account_request(pubkeys, commitment); + let request = + Self::build_account_request(pubkeys, commitment, from_slot); self.stream_factory.subscribe(request) } @@ -608,7 +616,9 @@ mod tests { n: usize, ) -> Vec { let pks = make_pubkeys(n); - mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&pks, &COMMITMENT, None) + .await + .unwrap(); pks } @@ -624,7 +634,9 @@ mod tests { while remaining > 0 { let n = remaining.min(batch); let pks = make_pubkeys(n); - mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&pks, &COMMITMENT, None) + .await + .unwrap(); all.extend(pks); remaining -= n; } @@ -654,7 +666,9 @@ mod tests { let (mut mgr, factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); assert_subscriptions_eq(&mgr, &[pk]); @@ -668,7 +682,9 @@ mod tests { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(5); - mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&pks, &COMMITMENT, None) + .await + .unwrap(); assert_subscriptions_eq(&mgr, &pks); @@ -682,10 +698,14 @@ mod tests { let (mut mgr, factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); let calls_after_first = factory.captured_requests().len(); - mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); assert_subscriptions_eq(&mgr, &[pk]); assert_eq!(factory.captured_requests().len(), calls_after_first); @@ -696,9 +716,15 @@ mod tests { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(3); - mgr.account_subscribe(&[pks[0]], &COMMITMENT).await.unwrap(); - mgr.account_subscribe(&[pks[1]], &COMMITMENT).await.unwrap(); - mgr.account_subscribe(&[pks[2]], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pks[0]], &COMMITMENT, None) + .await + .unwrap(); + mgr.account_subscribe(&[pks[1]], &COMMITMENT, None) + .await + .unwrap(); + mgr.account_subscribe(&[pks[2]], &COMMITMENT, None) + .await + .unwrap(); assert_subscriptions_eq(&mgr, &pks); @@ -744,14 +770,16 @@ mod tests { let (mut mgr, factory) = create_manager(); // Subscribe MAX_NEW (5) pubkeys first. let first_five = make_pubkeys(5); - mgr.account_subscribe(&first_five, &COMMITMENT) + mgr.account_subscribe(&first_five, &COMMITMENT, None) .await .unwrap(); assert_eq!(mgr.unoptimized_old_stream_count(), 0); // Subscribe the 6th pubkey → triggers promotion. let sixth = Pubkey::new_unique(); - mgr.account_subscribe(&[sixth], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[sixth], &COMMITMENT, None) + .await + .unwrap(); assert_eq!(mgr.unoptimized_old_stream_count(), 1); // A new current-new stream was created for the 6th pubkey. @@ -924,7 +952,9 @@ mod tests { // Subscribe a new pubkey after optimization. let new_pk = Pubkey::new_unique(); - mgr.account_subscribe(&[new_pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[new_pk], &COMMITMENT, None) + .await + .unwrap(); assert!(mgr.subscriptions().contains(&new_pk)); assert!(mgr.current_new_subs().contains(&new_pk)); @@ -961,7 +991,9 @@ mod tests { async fn test_unsubscribe_removes_from_subscriptions_set() { let (mut mgr, _factory) = create_manager(); let pks = make_pubkeys(3); - mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&pks, &COMMITMENT, None) + .await + .unwrap(); mgr.account_unsubscribe(&[pks[1]]); @@ -982,7 +1014,9 @@ mod tests { async fn test_unsubscribe_already_unsubscribed_pubkey() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); mgr.account_unsubscribe(&[pk]); mgr.account_unsubscribe(&[pk]); @@ -994,7 +1028,9 @@ mod tests { async fn test_unsubscribe_does_not_modify_streams() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(4); - mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&pks, &COMMITMENT, None) + .await + .unwrap(); let calls_before = factory.captured_requests().len(); mgr.account_unsubscribe(&pks[0..2]); @@ -1024,7 +1060,9 @@ mod tests { async fn test_unsubscribe_batch() { let (mut mgr, factory) = create_manager(); let pks = make_pubkeys(5); - mgr.account_subscribe(&pks, &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&pks, &COMMITMENT, None) + .await + .unwrap(); let calls_before = factory.captured_requests().len(); mgr.account_unsubscribe(&[pks[0], pks[2], pks[4]]); @@ -1041,7 +1079,9 @@ mod tests { async fn test_is_subscribed_returns_true_for_active() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); assert!(mgr.is_subscribed(&pk)); } @@ -1050,7 +1090,9 @@ mod tests { async fn test_is_subscribed_returns_false_after_unsubscribe() { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); mgr.account_unsubscribe(&[pk]); assert!(!mgr.is_subscribed(&pk)); @@ -1204,9 +1246,13 @@ mod tests { let (mut mgr, _factory) = create_manager(); let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); mgr.account_unsubscribe(&[pk]); - mgr.account_subscribe(&[pk], &COMMITMENT).await.unwrap(); + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); assert!(mgr.subscriptions().contains(&pk)); assert!(mgr.current_new_subs().contains(&pk)); @@ -1222,7 +1268,9 @@ mod tests { let commitment = CommitmentLevel::Finalized; let pk = Pubkey::new_unique(); - mgr.account_subscribe(&[pk], &commitment).await.unwrap(); + mgr.account_subscribe(&[pk], &commitment, None) + .await + .unwrap(); let reqs = factory.captured_requests(); assert_eq!(reqs.len(), 1); @@ -1277,4 +1325,79 @@ mod tests { assert_eq!(factory.captured_requests().len(), calls_before); } + + // ------------------------------------------------------------- + // 11. from_slot Support + // ------------------------------------------------------------- + + #[tokio::test] + async fn test_from_slot_set_on_subscribe_request() { + let (mut mgr, factory) = create_manager(); + let pk = Pubkey::new_unique(); + + mgr.account_subscribe(&[pk], &COMMITMENT, Some(42)) + .await + .unwrap(); + + let reqs = factory.captured_requests(); + assert_eq!(reqs.len(), 1); + assert_eq!(reqs[0].from_slot, Some(42)); + } + + #[tokio::test] + async fn test_from_slot_none_when_not_provided() { + let (mut mgr, factory) = create_manager(); + let pk = Pubkey::new_unique(); + + mgr.account_subscribe(&[pk], &COMMITMENT, None) + .await + .unwrap(); + + let reqs = factory.captured_requests(); + assert_eq!(reqs.len(), 1); + assert_eq!(reqs[0].from_slot, None); + } + + #[tokio::test] + async fn test_from_slot_forwarded_to_handle_write() { + let (mut mgr, factory) = create_manager(); + let pks = make_pubkeys(2); + + // First call creates the stream. + mgr.account_subscribe(&[pks[0]], &COMMITMENT, Some(100)) + .await + .unwrap(); + // Second call updates via handle.write(). + mgr.account_subscribe(&[pks[1]], &COMMITMENT, Some(200)) + .await + .unwrap(); + + let handle_reqs = factory.handle_requests(); + assert_eq!(handle_reqs.len(), 1); + assert_eq!(handle_reqs[0].from_slot, Some(200)); + } + + #[tokio::test] + async fn test_optimize_sets_from_slot_none() { + let (mut mgr, factory) = create_manager(); + mgr.account_subscribe(&make_pubkeys(5), &COMMITMENT, Some(42)) + .await + .unwrap(); + + let reqs_before = factory.captured_requests().len(); + mgr.optimize(&COMMITMENT); + + let optimize_reqs: Vec<_> = factory + .captured_requests() + .into_iter() + .skip(reqs_before) + .collect(); + assert!(!optimize_reqs.is_empty()); + for req in &optimize_reqs { + assert_eq!( + req.from_slot, None, + "optimized streams should have from_slot=None", + ); + } + } } From e7e9188d65b07fc15a87014ae64baee4d98af3bc Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 15:26:52 +0700 Subject: [PATCH 59/64] chore: initial stream manager integration with some problems --- .../chain_laser_actor/actor.rs | 344 +++++++----------- .../chain_laser_actor/mod.rs | 2 +- .../chain_laser_actor/stream_manager.rs | 118 +++--- 3 files changed, 199 insertions(+), 265 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 1df33d8a9..4eb878535 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -8,7 +8,6 @@ use std::{ time::Duration, }; -use futures_util::StreamExt; use helius_laserstream::{ grpc::{subscribe_update::UpdateOneof, CommitmentLevel, SubscribeUpdate}, LaserstreamConfig, LaserstreamError, @@ -16,7 +15,6 @@ use helius_laserstream::{ use magicblock_core::logger::log_trace_debug; use magicblock_metrics::metrics::{ inc_account_subscription_account_updates_count, - inc_account_subscription_activations_count, inc_per_program_account_updates_count, inc_program_subscription_account_updates_count, }; @@ -26,13 +24,12 @@ use solana_commitment_config::CommitmentLevel as SolanaCommitmentLevel; use solana_pubkey::Pubkey; use solana_sdk_ids::sysvar::clock; use tokio::sync::{mpsc, oneshot}; -use tokio_stream::StreamMap; use tonic::Code; use tracing::*; use super::{ - LaserResult, LaserStream, StreamFactory, StreamHandle, StreamManager, - StreamManagerConfig, + LaserResult, StreamFactory, StreamHandle, StreamManager, + StreamManagerConfig, StreamUpdateSource, }; use crate::remote_account_provider::{ chain_rpc_client::{ChainRpcClient, ChainRpcClientImpl}, @@ -45,12 +42,6 @@ use crate::remote_account_provider::{ SubscriptionUpdate, }; -type LaserStreamUpdate = (usize, LaserResult); - -const PER_STREAM_SUBSCRIPTION_LIMIT: usize = 1_000; -const SUBSCRIPTION_ACTIVATION_INTERVAL_MILLIS: u64 = 10_000; -const SLOTS_BETWEEN_ACTIVATIONS: u64 = - SUBSCRIPTION_ACTIVATION_INTERVAL_MILLIS / 400; const MAX_SLOTS_BACKFILL: u64 = 400; pub type SharedSubscriptions = Arc>>; @@ -92,53 +83,55 @@ impl fmt::Display for AccountUpdateSource { // ----------------- // ChainLaserActor // ----------------- -/// ChainLaserActor manages gRPC subscriptions to Helius Laser or Triton endpoints. +/// ChainLaserActor manages gRPC subscriptions to Helius Laser +/// or Triton endpoints. /// /// ## Subscription Lifecycle /// -/// 1. **Requested**: User calls `subscribe(pubkey)`. Pubkey is added to `subscriptions` set. -/// 2. **Queued**: Every [SUBSCRIPTION_ACTIVATION_INTERVAL_MILLIS], `update_active_subscriptions()` creates new streams. -/// 3. **Active**: Subscriptions are sent to Helius/Triton via gRPC streams in `active_subscriptions`. -/// 4. **Updates**: Account updates flow back via the streams and are forwarded to the consumer. +/// 1. **Requested**: User calls `subscribe(pubkey)`. +/// 2. **Active**: The pubkey is immediately forwarded to the +/// [StreamManager] which handles stream creation/chunking. +/// 3. **Updates**: Account updates flow back via the streams +/// and are forwarded to the consumer. /// /// ## Stream Management /// -/// - Subscriptions are grouped into chunks of up to 1,000 per stream (Helius limit). -/// - Each chunk gets its own gRPC stream (`StreamMap`). -/// - When subscriptions change, ALL streams are dropped and recreated. -/// - This simplifies reasoning but loses in-flight updates during the transition. +/// Stream creation, chunking, promotion, and optimization are +/// fully delegated to [StreamManager]. /// /// ## Reconnection Behavior /// -/// - If a stream ends unexpectedly, `signal_connection_issue()` is called. -/// - The actor sends an abort signal to the submux, which triggers reconnection. -/// - The actor itself doesn't attempt to reconnect; it relies on external recovery. +/// - If a stream ends unexpectedly, `signal_connection_issue()` +/// is called. +/// - The actor sends an abort signal to the submux, which +/// triggers reconnection. +/// - The actor itself doesn't attempt to reconnect; it relies +/// on external recovery. pub struct ChainLaserActor> { - /// Manager for creating laser streams + /// Manager for creating and polling laser streams stream_manager: StreamManager, - /// Requested subscriptions, some may not be active yet. /// Shared with ChainLaserClientImpl for sync access to /// subscription_count and subscriptions_union. + /// Also used in `process_subscription_update` to filter + /// incoming updates. subscriptions: SharedSubscriptions, - /// Pubkeys of currently active subscriptions - active_subscription_pubkeys: HashSet, - /// Subscriptions that have been activated via the helius provider - active_subscriptions: StreamMap, /// Receives subscribe/unsubscribe messages to this actor messages_receiver: mpsc::Receiver, - /// Sends updates for any account subscription that is received via - /// the Laser client subscription mechanism + /// Sends updates for any account subscription that is + /// received via the Laser client subscription mechanism subscription_updates_sender: mpsc::Sender, /// The commitment level to use for subscriptions commitment: CommitmentLevel, /// Channel used to signal connection issues to the submux abort_sender: mpsc::Sender<()>, - /// Slot tracking for chain slot synchronization and activation lookback + /// Slot tracking for chain slot synchronization and + /// activation lookback slots: Slots, - /// Unique client ID including the gRPC provider name for this actor instance used in logs - /// and metrics + /// Unique client ID including the gRPC provider name for + /// this actor instance used in logs and metrics client_id: String, - /// RPC client for diagnostics (e.g., fetching slot when falling behind) + /// RPC client for diagnostics (e.g., fetching slot when + /// falling behind) rpc_client: ChainRpcClientImpl, } @@ -236,8 +229,6 @@ impl> ChainLaserActor { ), messages_receiver, subscriptions, - active_subscriptions: Default::default(), - active_subscription_pubkeys: Default::default(), subscription_updates_sender, commitment, abort_sender, @@ -258,20 +249,16 @@ impl> ChainLaserActor { #[instrument(skip(self), fields(client_id = %self.client_id))] fn shutdown(&mut self) { info!("Shutting down laser actor"); - self.subscriptions.write().clear(); - self.active_subscriptions.clear(); - self.active_subscription_pubkeys.clear(); + Self::clear_subscriptions( + &self.subscriptions, + &mut self.stream_manager, + ); } #[instrument(skip(self), fields(client_id = %self.client_id))] pub async fn run(mut self) { - let mut activate_subs_interval = - tokio::time::interval(std::time::Duration::from_millis( - SUBSCRIPTION_ACTIVATION_INTERVAL_MILLIS, - )); loop { tokio::select! { - // Receive messages from the user msg = self.messages_receiver.recv() => { match msg { Some(msg) => { @@ -282,43 +269,19 @@ impl> ChainLaserActor { None => break, } }, - // Account subscription updates - update = self.active_subscriptions.next(), if !self.active_subscriptions.is_empty() => { - match update { - Some(update) => { - self.handle_account_update(update).await; - } - None => { - debug!("Account subscription stream ended"); - Self::signal_connection_issue( - &self.subscriptions, - &mut self.active_subscriptions, - &mut self.active_subscription_pubkeys, - &mut self.stream_manager, - &self.abort_sender, - &self.client_id, - ) - .await; - } - } - }, - // Program subscription updates - update = async { - match self.stream_manager.program_stream_mut() { - Some(swh) => swh.stream.next().await, - None => std::future::pending().await, - } - }, if self.stream_manager.has_program_subscriptions() => { + update = self.stream_manager.next_update(), if self.stream_manager.has_any_subscriptions() => { match update { - Some(update) => { - self.handle_program_update(update).await; + Some((src, result)) => { + self.handle_stream_result( + src, result, + ).await; } None => { - debug!("Program subscription stream ended"); + debug!( + "Subscription stream ended" + ); Self::signal_connection_issue( &self.subscriptions, - &mut self.active_subscriptions, - &mut self.active_subscription_pubkeys, &mut self.stream_manager, &self.abort_sender, &self.client_id, @@ -327,11 +290,6 @@ impl> ChainLaserActor { } } }, - // Activate pending subscriptions - _ = activate_subs_interval.tick() => { - self.update_active_subscriptions(); - }, - } } } @@ -342,7 +300,7 @@ impl> ChainLaserActor { AccountSubscribe { pubkey, response, .. } => { - self.add_sub(pubkey, response); + self.add_sub(pubkey, response).await; false } AccountUnsubscribe { pubkey, response } => { @@ -359,7 +317,7 @@ impl> ChainLaserActor { warn!(client_id = self.client_id, program_id = %pubkey, "Failed to send program subscribe response"); }); } else { - let _ =response.send(Ok(())).inspect_err(|_| { + let _ = response.send(Ok(())).inspect_err(|_| { warn!(client_id = self.client_id, program_id = %pubkey, "Failed to send program subscribe response"); }); }; @@ -381,8 +339,6 @@ impl> ChainLaserActor { info!(client_id = self.client_id, "Received Shutdown message"); Self::clear_subscriptions( &self.subscriptions, - &mut self.active_subscriptions, - &mut self.active_subscription_pubkeys, &mut self.stream_manager, ); let _ = response.send(Ok(())).inspect_err(|_| { @@ -396,30 +352,52 @@ impl> ChainLaserActor { } } - /// Tracks subscriptions, but does not yet activate them. - fn add_sub( + /// Subscribes to the given pubkey immediately by forwarding + /// to the stream manager. + async fn add_sub( &mut self, pubkey: Pubkey, sub_response: oneshot::Sender>, ) { if self.subscriptions.read().contains(&pubkey) { - debug!(pubkey = %pubkey, "Already subscribed to account"); + debug!( + pubkey = %pubkey, + "Already subscribed to account" + ); sub_response.send(Ok(())).unwrap_or_else(|_| { warn!(pubkey = %pubkey, "Failed to send already subscribed response"); }); - } else { - self.subscriptions.write().insert(pubkey); - // If this is the first sub for the clock sysvar we want to activate it immediately - if self.active_subscriptions.is_empty() { - self.update_active_subscriptions(); - } - sub_response.send(Ok(())).unwrap_or_else(|_| { - warn!(pubkey = %pubkey, "Failed to send subscribe response"); - }) + return; } + + self.subscriptions.write().insert(pubkey); + + let from_slot = self.determine_from_slot().map(|(_, fs)| fs); + let result = self + .stream_manager + .account_subscribe(&[pubkey], &self.commitment, from_slot) + .await; + + let response = match result { + Ok(()) => Ok(()), + Err(e) => { + error!( + pubkey = %pubkey, + error = ?e, + "Failed to subscribe to account" + ); + Err(e) + } + }; + sub_response.send(response).unwrap_or_else(|_| { + warn!( + pubkey = %pubkey, + "Failed to send subscribe response" + ); + }); } - /// Removes a subscription, but does not yet deactivate it. + /// Removes a subscription and forwards to the stream manager. fn remove_sub( &mut self, pubkey: &Pubkey, @@ -427,7 +405,11 @@ impl> ChainLaserActor { ) { match self.subscriptions.write().remove(pubkey) { true => { - trace!(pubkey = %pubkey, "Unsubscribed from account"); + self.stream_manager.account_unsubscribe(&[*pubkey]); + trace!( + pubkey = %pubkey, + "Unsubscribed from account" + ); unsub_response.send(Ok(())).unwrap_or_else(|_| { warn!(pubkey = %pubkey, "Failed to send unsubscribe response"); }); @@ -446,69 +428,12 @@ impl> ChainLaserActor { } } - fn update_active_subscriptions(&mut self) { - // Copy subscriptions and release the read lock immediately - let new_pubkeys: HashSet = { - let subs = self.subscriptions.read(); - // Check if the active subscriptions match what we already have - if subs.eq(&self.active_subscription_pubkeys) { - trace!( - count = subs.len(), - "Active subscriptions already up to date" - ); - return; - } - subs.iter().copied().collect() - }; - - inc_account_subscription_activations_count(&self.client_id); - - let mut new_subs: StreamMap = StreamMap::new(); - - // Re-create streams for all subscriptions - let sub_refs = new_pubkeys.iter().collect::>(); - - let chunks = sub_refs - .chunks(PER_STREAM_SUBSCRIPTION_LIMIT) - .map(|chunk| { - chunk.iter().map(|pk| pk as &Pubkey).collect::>() - }) - .collect::>(); - - let (chain_slot, from_slot) = self - .determine_from_slot() - .map(|(cs, fs)| (Some(cs), Some(fs))) - .unwrap_or((None, None)); - - if tracing::enabled!(tracing::Level::TRACE) { - trace!( - account_count = sub_refs.len(), - chain_slot, - from_slot, - stream_count = chunks.len(), - "Activating account subscriptions" - ); - } - - for (idx, chunk) in chunks.into_iter().enumerate() { - let swh = self.stream_manager.account_subscribe_old( - &chunk, - &self.commitment, - idx, - from_slot, - ); - new_subs.insert(idx, swh.stream); - } - - // Drop current active subscriptions by reassignig to new ones - self.active_subscriptions = new_subs; - self.active_subscription_pubkeys = new_pubkeys; - } - - /// Determines the from_slot for backfilling subscription updates. + /// Determines the from_slot for backfilling subscription + /// updates. /// - /// Returns `Some((chain_slot, from_slot))` if backfilling is supported and we have a valid chain slot, - /// otherwise returns `None`. + /// Returns `Some((chain_slot, from_slot))` if backfilling is + /// supported and we have a valid chain slot, otherwise + /// returns `None`. fn determine_from_slot(&self) -> Option<(u64, u64)> { if !self.slots.supports_backfill { return None; @@ -516,22 +441,17 @@ impl> ChainLaserActor { let chain_slot = self.slots.chain_slot.load(); if chain_slot == 0 { - // If we didn't get a chain slot update yet we cannot backfill return None; } - // Get last activation slot and update to current chain slot let last_activation_slot = self .slots .last_activation_slot .swap(chain_slot, Ordering::Relaxed); - // when this is called the first time make the best effort to find a reasonable - // slot to backfill from. let from_slot = if last_activation_slot == 0 { - chain_slot.saturating_sub(SLOTS_BETWEEN_ACTIVATIONS + 1) + chain_slot.saturating_sub(MAX_SLOTS_BACKFILL) } else { - // Limit how far back we go in order to avoid data loss errors let target_slot = last_activation_slot.saturating_sub(1); let delta = chain_slot.saturating_sub(target_slot); if delta < MAX_SLOTS_BACKFILL { @@ -543,46 +463,38 @@ impl> ChainLaserActor { Some((chain_slot, from_slot)) } - /// Handles an update from one of the account data streams. - #[instrument(skip(self), fields(client_id = %self.client_id, stream_index = %idx))] - async fn handle_account_update( + /// Handles an update from any subscription stream. + #[instrument(skip(self), fields(client_id = %self.client_id))] + async fn handle_stream_result( &mut self, - (idx, result): LaserStreamUpdate, + src: StreamUpdateSource, + result: LaserResult, ) { + let update_source = match src { + StreamUpdateSource::Account => AccountUpdateSource::Account, + StreamUpdateSource::Program => AccountUpdateSource::Program, + }; match result { Ok(subscribe_update) => { self.process_subscription_update( subscribe_update, - AccountUpdateSource::Account, - ) - .await; - } - Err(err) => { - self.handle_stream_error(&err, "account update").await; - } - } - } - - /// Handles an update from the program subscriptions stream. - #[instrument(skip(self), fields(client_id = %self.client_id))] - async fn handle_program_update(&mut self, result: LaserResult) { - match result { - Ok(subscribe_update) => { - self.process_subscription_update( - subscribe_update, - AccountUpdateSource::Program, + update_source, ) .await; } Err(err) => { - self.handle_stream_error(&err, "program subscription").await; + let label = match src { + StreamUpdateSource::Account => "account update", + StreamUpdateSource::Program => "program subscription", + }; + self.handle_stream_error(&err, label).await; } } } - /// Common error handling for stream errors. Detects "fallen behind" errors - /// and spawns diagnostics to compare our last known slot with the actual - /// chain slot via RPC. + /// Common error handling for stream errors. Detects "fallen + /// behind" errors and spawns diagnostics to compare our last + /// known slot with the actual chain slot via RPC. async fn handle_stream_error( &mut self, err: &LaserstreamError, @@ -592,11 +504,14 @@ impl> ChainLaserActor { self.spawn_fallen_behind_diagnostics(source); } - error!(error = ?err, slots = ?self.slots, "Error in {} stream", source); + error!( + error = ?err, + slots = ?self.slots, + "Error in {} stream", + source, + ); Self::signal_connection_issue( &self.subscriptions, - &mut self.active_subscriptions, - &mut self.active_subscription_pubkeys, &mut self.stream_manager, &self.abort_sender, &self.client_id, @@ -666,25 +581,21 @@ impl> ChainLaserActor { fn clear_subscriptions( subscriptions: &SharedSubscriptions, - active_subscriptions: &mut StreamMap, - active_subscription_pubkeys: &mut HashSet, stream_manager: &mut StreamManager, ) { subscriptions.write().clear(); - active_subscriptions.clear(); - active_subscription_pubkeys.clear(); + stream_manager.clear_account_subscriptions(); stream_manager.clear_program_subscriptions(); } - /// Signals a connection issue by clearing all subscriptions and - /// sending a message on the abort channel. - /// NOTE: the laser client should handle reconnects internally, but - /// we add this as a backup in case it is unable to do so - #[instrument(skip(subscriptions, active_subscriptions, active_subscription_pubkeys, stream_manager, abort_sender), fields(client_id = %client_id))] + /// Signals a connection issue by clearing all subscriptions + /// and sending a message on the abort channel. + /// NOTE: the laser client should handle reconnects + /// internally, but we add this as a backup in case it is + /// unable to do so + #[instrument(skip(subscriptions, stream_manager, abort_sender), fields(client_id = %client_id))] async fn signal_connection_issue( subscriptions: &SharedSubscriptions, - active_subscriptions: &mut StreamMap, - active_subscription_pubkeys: &mut HashSet, stream_manager: &mut StreamManager, abort_sender: &mpsc::Sender<()>, client_id: &str, @@ -699,19 +610,16 @@ impl> ChainLaserActor { &SIGNAL_CONNECTION_COUNT, ); - // Clear all subscriptions - Self::clear_subscriptions( - subscriptions, - active_subscriptions, - active_subscription_pubkeys, - stream_manager, - ); + Self::clear_subscriptions(subscriptions, stream_manager); - // Use try_send to avoid blocking and naturally coalesce signals + // Use try_send to avoid blocking and naturally + // coalesce signals let _ = abort_sender.try_send(()).inspect_err(|err| { - // Channel full is expected when reconnect is already in progress if !matches!(err, mpsc::error::TrySendError::Full(_)) { - error!(error = ?err, "Failed to signal connection issue"); + error!( + error = ?err, + "Failed to signal connection issue" + ); } }); } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index 84275edfb..fe86c4df1 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -9,7 +9,7 @@ use helius_laserstream::{ pub use self::{ actor::{ChainLaserActor, SharedSubscriptions, Slots}, - stream_manager::{StreamManager, StreamManagerConfig}, + stream_manager::{StreamManager, StreamManagerConfig, StreamUpdateSource}, }; mod actor; diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index e97830c9e..b1b600791 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -1,5 +1,6 @@ use std::collections::{HashMap, HashSet}; +use futures_util::StreamExt; use helius_laserstream::grpc::{ CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots, @@ -7,12 +8,20 @@ use helius_laserstream::grpc::{ use solana_pubkey::Pubkey; use tokio::time::Duration; -use super::{LaserStreamWithHandle, StreamFactory}; +use super::{LaserResult, LaserStreamWithHandle, StreamFactory}; use crate::remote_account_provider::{ chain_laser_actor::StreamHandle, RemoteAccountProviderError, RemoteAccountProviderResult, }; +/// Identifies whether a stream update came from an account or +/// program subscription stream. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum StreamUpdateSource { + Account, + Program, +} + /// Configuration for the generational stream manager. #[allow(unused)] pub struct StreamManagerConfig { @@ -233,6 +242,65 @@ impl> StreamManager { } } + /// Clears all account subscriptions and drops all account + /// streams. + pub fn clear_account_subscriptions(&mut self) { + self.subscriptions.clear(); + self.current_new_subs.clear(); + self.current_new_stream = None; + self.unoptimized_old_streams.clear(); + self.optimized_old_streams.clear(); + } + + /// Returns `true` if any account stream exists. + pub fn has_account_subscriptions(&self) -> bool { + self.current_new_stream.is_some() + || !self.unoptimized_old_streams.is_empty() + || !self.optimized_old_streams.is_empty() + } + + /// Polls all account and program streams, returning the next + /// available update tagged with its source. + /// Returns `None` when all streams have ended. + pub async fn next_update( + &mut self, + ) -> Option<(StreamUpdateSource, LaserResult)> { + let mut all: Vec<(StreamUpdateSource, &mut LaserStreamWithHandle)> = + Vec::new(); + for s in &mut self.optimized_old_streams { + all.push((StreamUpdateSource::Account, s)); + } + for s in &mut self.unoptimized_old_streams { + all.push((StreamUpdateSource::Account, s)); + } + if let Some(s) = &mut self.current_new_stream { + all.push((StreamUpdateSource::Account, s)); + } + if let Some((_, s)) = &mut self.program_subscriptions { + all.push((StreamUpdateSource::Program, s)); + } + + if all.is_empty() { + return None; + } + + let futs: futures_util::stream::FuturesUnordered<_> = all + .into_iter() + .map(|(src, s)| { + let stream = &mut s.stream; + async move { (src, stream.next().await) } + }) + .collect(); + let (src, result) = futs.into_future().await.0?; + Some((src, result?)) + } + + /// Returns `true` if any stream (account or program) + /// exists. + pub fn has_any_subscriptions(&self) -> bool { + self.has_account_subscriptions() || self.has_program_subscriptions() + } + /// Rebuild all account streams from `subscriptions`. /// /// 1. Chunk `subscriptions` into groups of @@ -249,8 +317,9 @@ impl> StreamManager { .chunks(self.config.max_subs_in_old_optimized) .map(|chunk| { let refs: Vec<&Pubkey> = chunk.iter().collect(); - self.stream_factory - .subscribe(Self::build_account_request(&refs, commitment, None)) + self.stream_factory.subscribe(Self::build_account_request( + &refs, commitment, None, + )) }) .collect(); @@ -378,49 +447,6 @@ impl> StreamManager { self.stream_factory.subscribe(request) } - // ========================================================= - // Legacy account subscribe (kept for migration) - // ========================================================= - - /// Creates a subscription stream for account updates (legacy). - /// - /// It includes a slot subscription for chain slot synchronization. - pub fn account_subscribe_old( - &self, - pubkeys: &[&Pubkey], - commitment: &CommitmentLevel, - idx: usize, - from_slot: Option, - ) -> LaserStreamWithHandle { - let mut accounts = HashMap::new(); - accounts.insert( - format!("account_subs: {idx}"), - SubscribeRequestFilterAccounts { - account: pubkeys.iter().map(|pk| pk.to_string()).collect(), - ..Default::default() - }, - ); - - // Subscribe to slot updates for chain_slot synchronization - let mut slots = HashMap::new(); - slots.insert( - "slot_updates".to_string(), - SubscribeRequestFilterSlots { - filter_by_commitment: Some(true), - ..Default::default() - }, - ); - - let request = SubscribeRequest { - accounts, - slots, - commitment: Some((*commitment).into()), - from_slot, - ..Default::default() - }; - self.stream_factory.subscribe(request) - } - /// Adds a program subscription. If the program is already /// subscribed, this is a no-op. Otherwise, updates the program /// stream to include all subscribed programs. From 84bfa2b7b63500aa178fc7c674813eef1f88124f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 15:37:22 +0700 Subject: [PATCH 60/64] chore: single subscriptions source in stream manager --- .../chain_laser_actor/actor.rs | 85 +++++++------------ .../chain_laser_actor/mod.rs | 12 ++- .../chain_laser_actor/stream_manager.rs | 67 +++++++++------ 3 files changed, 81 insertions(+), 83 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 4eb878535..0d7afb9e5 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -1,5 +1,4 @@ use std::{ - collections::HashSet, fmt, sync::{ atomic::{AtomicU16, AtomicU64, Ordering}, @@ -18,7 +17,6 @@ use magicblock_metrics::metrics::{ inc_per_program_account_updates_count, inc_program_subscription_account_updates_count, }; -use parking_lot::RwLock; use solana_account::Account; use solana_commitment_config::CommitmentLevel as SolanaCommitmentLevel; use solana_pubkey::Pubkey; @@ -28,8 +26,8 @@ use tonic::Code; use tracing::*; use super::{ - LaserResult, StreamFactory, StreamHandle, StreamManager, - StreamManagerConfig, StreamUpdateSource, + LaserResult, SharedSubscriptions, StreamFactory, StreamHandle, + StreamManager, StreamManagerConfig, StreamUpdateSource, }; use crate::remote_account_provider::{ chain_rpc_client::{ChainRpcClient, ChainRpcClientImpl}, @@ -44,8 +42,6 @@ use crate::remote_account_provider::{ const MAX_SLOTS_BACKFILL: u64 = 400; -pub type SharedSubscriptions = Arc>>; - // ----------------- // Slots // ----------------- @@ -110,11 +106,6 @@ impl fmt::Display for AccountUpdateSource { pub struct ChainLaserActor> { /// Manager for creating and polling laser streams stream_manager: StreamManager, - /// Shared with ChainLaserClientImpl for sync access to - /// subscription_count and subscriptions_union. - /// Also used in `process_subscription_update` to filter - /// incoming updates. - subscriptions: SharedSubscriptions, /// Receives subscribe/unsubscribe messages to this actor messages_receiver: mpsc::Receiver, /// Sends updates for any account subscription that is @@ -219,16 +210,16 @@ impl> ChainLaserActor { mpsc::channel(MESSAGE_CHANNEL_SIZE); let commitment = grpc_commitment_from_solana(commitment); - let subscriptions: SharedSubscriptions = Default::default(); - let shared_subscriptions = Arc::clone(&subscriptions); + let stream_manager = StreamManager::new( + StreamManagerConfig::default(), + stream_factory, + ); + let shared_subscriptions = + Arc::clone(stream_manager.subscriptions()); let me = Self { - stream_manager: StreamManager::new( - StreamManagerConfig::default(), - stream_factory, - ), + stream_manager, messages_receiver, - subscriptions, subscription_updates_sender, commitment, abort_sender, @@ -249,10 +240,7 @@ impl> ChainLaserActor { #[instrument(skip(self), fields(client_id = %self.client_id))] fn shutdown(&mut self) { info!("Shutting down laser actor"); - Self::clear_subscriptions( - &self.subscriptions, - &mut self.stream_manager, - ); + Self::clear_subscriptions(&mut self.stream_manager); } #[instrument(skip(self), fields(client_id = %self.client_id))] @@ -281,7 +269,6 @@ impl> ChainLaserActor { "Subscription stream ended" ); Self::signal_connection_issue( - &self.subscriptions, &mut self.stream_manager, &self.abort_sender, &self.client_id, @@ -338,7 +325,6 @@ impl> ChainLaserActor { Shutdown { response } => { info!(client_id = self.client_id, "Received Shutdown message"); Self::clear_subscriptions( - &self.subscriptions, &mut self.stream_manager, ); let _ = response.send(Ok(())).inspect_err(|_| { @@ -359,7 +345,7 @@ impl> ChainLaserActor { pubkey: Pubkey, sub_response: oneshot::Sender>, ) { - if self.subscriptions.read().contains(&pubkey) { + if self.stream_manager.is_subscribed(&pubkey) { debug!( pubkey = %pubkey, "Already subscribed to account" @@ -370,8 +356,6 @@ impl> ChainLaserActor { return; } - self.subscriptions.write().insert(pubkey); - let from_slot = self.determine_from_slot().map(|(_, fs)| fs); let result = self .stream_manager @@ -403,28 +387,25 @@ impl> ChainLaserActor { pubkey: &Pubkey, unsub_response: oneshot::Sender>, ) { - match self.subscriptions.write().remove(pubkey) { - true => { - self.stream_manager.account_unsubscribe(&[*pubkey]); - trace!( - pubkey = %pubkey, - "Unsubscribed from account" - ); - unsub_response.send(Ok(())).unwrap_or_else(|_| { + if self.stream_manager.is_subscribed(pubkey) { + self.stream_manager.account_unsubscribe(&[*pubkey]); + trace!( + pubkey = %pubkey, + "Unsubscribed from account" + ); + unsub_response.send(Ok(())).unwrap_or_else(|_| { + warn!(pubkey = %pubkey, "Failed to send unsubscribe response"); + }); + } else { + unsub_response + .send(Err( + RemoteAccountProviderError::AccountSubscriptionDoesNotExist( + pubkey.to_string(), + ), + )) + .unwrap_or_else(|_| { warn!(pubkey = %pubkey, "Failed to send unsubscribe response"); }); - } - false => { - unsub_response - .send(Err( - RemoteAccountProviderError::AccountSubscriptionDoesNotExist( - pubkey.to_string(), - ), - )) - .unwrap_or_else(|_| { - warn!(pubkey = %pubkey, "Failed to send unsubscribe response"); - }); - } } } @@ -511,7 +492,6 @@ impl> ChainLaserActor { source, ); Self::signal_connection_issue( - &self.subscriptions, &mut self.stream_manager, &self.abort_sender, &self.client_id, @@ -580,10 +560,8 @@ impl> ChainLaserActor { } fn clear_subscriptions( - subscriptions: &SharedSubscriptions, stream_manager: &mut StreamManager, ) { - subscriptions.write().clear(); stream_manager.clear_account_subscriptions(); stream_manager.clear_program_subscriptions(); } @@ -593,9 +571,8 @@ impl> ChainLaserActor { /// NOTE: the laser client should handle reconnects /// internally, but we add this as a backup in case it is /// unable to do so - #[instrument(skip(subscriptions, stream_manager, abort_sender), fields(client_id = %client_id))] + #[instrument(skip(stream_manager, abort_sender), fields(client_id = %client_id))] async fn signal_connection_issue( - subscriptions: &SharedSubscriptions, stream_manager: &mut StreamManager, abort_sender: &mpsc::Sender<()>, client_id: &str, @@ -610,7 +587,7 @@ impl> ChainLaserActor { &SIGNAL_CONNECTION_COUNT, ); - Self::clear_subscriptions(subscriptions, stream_manager); + Self::clear_subscriptions(stream_manager); // Use try_send to avoid blocking and naturally // coalesce signals @@ -700,7 +677,7 @@ impl> ChainLaserActor { ); } - if !self.subscriptions.read().contains(&pubkey) { + if !self.stream_manager.is_subscribed(&pubkey) { return; } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index fe86c4df1..c9a6f39a0 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -1,4 +1,8 @@ -use std::pin::Pin; +use std::{ + collections::HashSet, + pin::Pin, + sync::Arc, +}; use async_trait::async_trait; use futures_util::Stream; @@ -6,12 +10,16 @@ use helius_laserstream::{ grpc::{SubscribeRequest, SubscribeUpdate}, LaserstreamError, StreamHandle as HeliusStreamHandle, }; +use parking_lot::RwLock; +use solana_pubkey::Pubkey; pub use self::{ - actor::{ChainLaserActor, SharedSubscriptions, Slots}, + actor::{ChainLaserActor, Slots}, stream_manager::{StreamManager, StreamManagerConfig, StreamUpdateSource}, }; +pub type SharedSubscriptions = Arc>>; + mod actor; mod mock; mod stream_manager; diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index b1b600791..74eff424f 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -8,7 +8,9 @@ use helius_laserstream::grpc::{ use solana_pubkey::Pubkey; use tokio::time::Duration; -use super::{LaserResult, LaserStreamWithHandle, StreamFactory}; +use super::{ + LaserResult, LaserStreamWithHandle, SharedSubscriptions, StreamFactory, +}; use crate::remote_account_provider::{ chain_laser_actor::StreamHandle, RemoteAccountProviderError, RemoteAccountProviderResult, @@ -72,7 +74,7 @@ pub struct StreamManager> { /// of streams, [Self::current_new_stream], /// [Self::unoptimized_old_streams], and /// [Self::optimized_old_streams]. - subscriptions: HashSet, + subscriptions: SharedSubscriptions, /// Pubkeys that are part of the current-new stream's filter. current_new_subs: HashSet, /// The current-new stream which holds the [Self::current_new_subs]. @@ -92,7 +94,7 @@ impl> StreamManager { config, stream_factory, program_subscriptions: None, - subscriptions: HashSet::new(), + subscriptions: Default::default(), current_new_subs: HashSet::new(), current_new_stream: None, unoptimized_old_streams: Vec::new(), @@ -154,19 +156,25 @@ impl> StreamManager { from_slot: Option, ) -> RemoteAccountProviderResult<()> { // Filter out pubkeys already in subscriptions. - let new_pks: Vec = pubkeys - .iter() - .filter(|pk| !self.subscriptions.contains(pk)) - .copied() - .collect(); + let new_pks: Vec = { + let subs = self.subscriptions.read(); + pubkeys + .iter() + .filter(|pk| !subs.contains(pk)) + .copied() + .collect() + }; if new_pks.is_empty() { return Ok(()); } - for pk in &new_pks { - self.subscriptions.insert(*pk); - self.current_new_subs.insert(*pk); + { + let mut subs = self.subscriptions.write(); + for pk in &new_pks { + subs.insert(*pk); + self.current_new_subs.insert(*pk); + } } // Update the current-new stream with the full @@ -237,15 +245,16 @@ impl> StreamManager { /// are never modified. Updates for these pubkeys will be ignored /// by the actor. pub fn account_unsubscribe(&mut self, pubkeys: &[Pubkey]) { + let mut subs = self.subscriptions.write(); for pk in pubkeys { - self.subscriptions.remove(pk); + subs.remove(pk); } } /// Clears all account subscriptions and drops all account /// streams. pub fn clear_account_subscriptions(&mut self) { - self.subscriptions.clear(); + self.subscriptions.write().clear(); self.current_new_subs.clear(); self.current_new_stream = None; self.unoptimized_old_streams.clear(); @@ -310,7 +319,8 @@ impl> StreamManager { /// 4. Reset the current-new stream (empty filter). pub fn optimize(&mut self, commitment: &CommitmentLevel) { // Collect all active subscriptions and chunk them. - let all_pks: Vec = self.subscriptions.iter().copied().collect(); + let all_pks: Vec = + self.subscriptions.read().iter().copied().collect(); // Build optimized old streams from chunks. self.optimized_old_streams = all_pks @@ -334,15 +344,15 @@ impl> StreamManager { /// Returns `true` if the pubkey is in the active `subscriptions` /// set. pub fn is_subscribed(&self, pubkey: &Pubkey) -> bool { - self.subscriptions.contains(pubkey) + self.subscriptions.read().contains(pubkey) } // --------------------------------------------------------- // Accessors — internal state inspection // --------------------------------------------------------- - /// Returns a reference to the canonical subscriptions set. - pub fn subscriptions(&self) -> &HashSet { + /// Returns a reference to the shared subscriptions. + pub fn subscriptions(&self) -> &SharedSubscriptions { &self.subscriptions } @@ -594,7 +604,7 @@ mod tests { mgr: &StreamManager, expected: &[Pubkey], ) { - let subs = mgr.subscriptions(); + let subs = mgr.subscriptions().read(); assert_eq!( subs.len(), expected.len(), @@ -603,7 +613,10 @@ mod tests { subs.len(), ); for pk in expected { - assert!(subs.contains(pk), "subscription set missing pubkey {pk}",); + assert!( + subs.contains(pk), + "subscription set missing pubkey {pk}", + ); } } @@ -853,7 +866,7 @@ mod tests { // After optimization: unoptimized should be empty. assert_eq!(mgr.unoptimized_old_stream_count(), 0); // Optimized old streams should exist. - let total_subs = mgr.subscriptions().len(); + let total_subs = mgr.subscriptions().read().len(); let expected_optimized = total_subs.div_ceil(10); // ceil(total / MAX_OLD_OPTIMIZED) assert_eq!(mgr.optimized_old_stream_count(), expected_optimized,); } @@ -982,7 +995,7 @@ mod tests { .await .unwrap(); - assert!(mgr.subscriptions().contains(&new_pk)); + assert!(mgr.subscriptions().read().contains(&new_pk)); assert!(mgr.current_new_subs().contains(&new_pk)); } @@ -1033,7 +1046,7 @@ mod tests { mgr.account_unsubscribe(&[random]); - assert!(mgr.subscriptions().is_empty()); + assert!(mgr.subscriptions().read().is_empty()); } #[tokio::test] @@ -1047,7 +1060,7 @@ mod tests { mgr.account_unsubscribe(&[pk]); mgr.account_unsubscribe(&[pk]); - assert!(mgr.subscriptions().is_empty()); + assert!(mgr.subscriptions().read().is_empty()); } #[tokio::test] @@ -1224,7 +1237,7 @@ mod tests { // ceil(50 / 10) = 5 assert_eq!(mgr.optimized_old_stream_count(), 5); - assert_eq!(mgr.subscriptions().len(), 50); + assert_eq!(mgr.subscriptions().read().len(), 50); assert_eq!(mgr.current_new_sub_count(), 0); // Verify the union of all optimized stream filters equals all @@ -1251,7 +1264,7 @@ mod tests { mgr.account_unsubscribe(&new_pks[0..2]); let expected_count = 20 - 8 + 5 - 2; - assert_eq!(mgr.subscriptions().len(), expected_count); + assert_eq!(mgr.subscriptions().read().len(), expected_count); let reqs_before = factory.captured_requests().len(); mgr.optimize(&COMMITMENT); @@ -1280,7 +1293,7 @@ mod tests { .await .unwrap(); - assert!(mgr.subscriptions().contains(&pk)); + assert!(mgr.subscriptions().read().contains(&pk)); assert!(mgr.current_new_subs().contains(&pk)); } @@ -1346,7 +1359,7 @@ mod tests { let calls_before = factory.captured_requests().len(); let pks: Vec = - mgr.subscriptions().iter().take(3).copied().collect(); + mgr.subscriptions().read().iter().take(3).copied().collect(); mgr.account_unsubscribe(&pks); assert_eq!(factory.captured_requests().len(), calls_before); From ceeaafd9df2f3594ae118372420278fabb1c06b0 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 15:52:21 +0700 Subject: [PATCH 61/64] chore: separate handles from streams to allow polling streammap --- .../chain_laser_actor/stream_manager.rs | 366 +++++++++--------- 1 file changed, 188 insertions(+), 178 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 74eff424f..b84ad579b 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -1,15 +1,16 @@ use std::collections::{HashMap, HashSet}; -use futures_util::StreamExt; use helius_laserstream::grpc::{ CommitmentLevel, SubscribeRequest, SubscribeRequestFilterAccounts, SubscribeRequestFilterSlots, }; use solana_pubkey::Pubkey; use tokio::time::Duration; +use tokio_stream::StreamMap; use super::{ - LaserResult, LaserStreamWithHandle, SharedSubscriptions, StreamFactory, + LaserResult, LaserStream, LaserStreamWithHandle, SharedSubscriptions, + StreamFactory, }; use crate::remote_account_provider::{ chain_laser_actor::StreamHandle, RemoteAccountProviderError, @@ -24,6 +25,28 @@ pub enum StreamUpdateSource { Program, } +/// Identifies a stream within the [StreamMap]. +/// +/// Each variant maps to a stream category. The `usize` index +/// corresponds to the position within the respective `Vec` of +/// handles. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +enum StreamKey { + CurrentNew, + UnoptimizedOld(usize), + OptimizedOld(usize), + Program, +} + +impl StreamKey { + fn source(&self) -> StreamUpdateSource { + match self { + StreamKey::Program => StreamUpdateSource::Program, + _ => StreamUpdateSource::Account, + } + } +} + /// Configuration for the generational stream manager. #[allow(unused)] pub struct StreamManagerConfig { @@ -51,40 +74,54 @@ impl Default for StreamManagerConfig { /// Account subscriptions follow a generational approach: /// - New subscriptions go into the *current-new* stream. /// - When the current-new stream exceeds [StreamManagerConfig::max_subs_in_new] it is -/// promoted to the [Self::unoptimized_old_streams] vec and a fresh current-new stream is created. -/// - When [Self::unoptimized_old_streams] exceed [StreamManagerConfig::max_old_unoptimized], +/// promoted to the [Self::unoptimized_old_handles] vec and a fresh current-new stream is created. +/// - When [Self::unoptimized_old_handles] exceed [StreamManagerConfig::max_old_unoptimized], /// optimization is triggered which rebuilds all streams from the -/// `subscriptions` set into [StreamManager::optimized_old_streams] chunked by +/// `subscriptions` set into [StreamManager::optimized_old_handles] chunked by /// [StreamManagerConfig::max_subs_in_old_optimized]. /// /// Unsubscribe only removes from the [Self::subscriptions] HashSet — it /// never touches streams. Updates for unsubscribed pubkeys are /// ignored at the actor level. /// Unsubscribed accounts are dropped as part of optimization. +/// +/// Streams are stored in a persistent [StreamMap] keyed by +/// [StreamKey]. The map is only updated when stream topology +/// changes (subscribe, promote, optimize, clear). The +/// corresponding handles are stored separately for use in +/// [Self::update_subscriptions]. #[allow(unused)] pub struct StreamManager> { /// Configures limits for stream management config: StreamManagerConfig, /// The factory used to create streams stream_factory: SF, - /// Active streams for program subscriptions - program_subscriptions: Option<(HashSet, LaserStreamWithHandle)>, /// The canonical set of currently active account subscriptions. /// These include subscriptions maintained across the different set - /// of streams, [Self::current_new_stream], - /// [Self::unoptimized_old_streams], and - /// [Self::optimized_old_streams]. + /// of streams. subscriptions: SharedSubscriptions, /// Pubkeys that are part of the current-new stream's filter. current_new_subs: HashSet, - /// The current-new stream which holds the [Self::current_new_subs]. - /// (None until the first subscribe call). - current_new_stream: Option>, - /// Old streams that have not been optimized yet. - unoptimized_old_streams: Vec>, - /// Old streams created by optimization, each covering up to - /// [StreamManagerConfig::max_subs_in_old_optimized] subscriptions. - optimized_old_streams: Vec>, + + // -- Handles (needed for update_subscriptions) -- + /// Handle for the current-new stream. + current_new_handle: Option, + /// Handles for unoptimized old streams. + unoptimized_old_handles: Vec, + /// Handles for optimized old streams. + optimized_old_handles: Vec, + /// Handle + pubkey set for program subscriptions. + program_sub: Option<(HashSet, S)>, + + // -- All streams live here. -- + /// Streams separated from the handles in order to allow using them + /// inside a StreamMap + /// They are addressed via the StreamKey which includes an index for + /// [Self::unoptimized_old_handles] and [Self::optimized_old_handles]. + /// The key index matches the index of the corresponding vec. + /// Persistent stream map polled by [Self::next_update]. + /// Updated only when stream topology changes. + stream_map: StreamMap, } #[allow(unused)] @@ -93,12 +130,13 @@ impl> StreamManager { Self { config, stream_factory, - program_subscriptions: None, subscriptions: Default::default(), current_new_subs: HashSet::new(), - current_new_stream: None, - unoptimized_old_streams: Vec::new(), - optimized_old_streams: Vec::new(), + current_new_handle: None, + unoptimized_old_handles: Vec::new(), + optimized_old_handles: Vec::new(), + program_sub: None, + stream_map: StreamMap::new(), } } @@ -145,10 +183,12 @@ impl> StreamManager { /// Subscribe to account updates for the given pubkeys. /// - /// Each pubkey is added to [Self::subscriptions] and to the [Self::current_new_stream]. - /// If the [Self::current_new_stream] exceeds [StreamManagerConfig::max_subs_in_new] it - /// is promoted and a fresh one is created. If [Self::unoptimized_old_streams] exceed - /// [StreamManagerConfig::max_old_unoptimized], optimization is triggered. + /// Each pubkey is added to [Self::subscriptions] and to the + /// current-new stream. If the current-new stream exceeds + /// [StreamManagerConfig::max_subs_in_new] it is promoted and + /// a fresh one is created. If unoptimized old handles exceed + /// [StreamManagerConfig::max_old_unoptimized], optimization + /// is triggered. pub async fn account_subscribe( &mut self, pubkeys: &[Pubkey], @@ -178,26 +218,21 @@ impl> StreamManager { } // Update the current-new stream with the full - // current_new_subs filter (either create new if doesn't exist, - // or update existing via write). - if let Some(stream) = &self.current_new_stream { + // current_new_subs filter (either create new if doesn't + // exist, or update existing via write). + if let Some(handle) = &self.current_new_handle { let request = Self::build_account_request( &self.current_new_subs.iter().collect::>(), commitment, from_slot, ); - Self::update_subscriptions( - &stream.handle, - "account_subscribe", - request, - ) - .await? + Self::update_subscriptions(handle, "account_subscribe", request) + .await? } else { - self.current_new_stream = Some(self.create_account_stream( - &self.current_new_subs.iter().collect::>(), - commitment, - from_slot, - )); + let pks: Vec = + self.current_new_subs.iter().copied().collect(); + let pk_refs: Vec<&Pubkey> = pks.iter().collect(); + self.insert_current_new_stream(&pk_refs, commitment, from_slot); } // Promote if current-new exceeds threshold. @@ -208,28 +243,33 @@ impl> StreamManager { let overflow_start = new_pks.len().saturating_sub(overflow_count); let overflow_pks = &new_pks[overflow_start..]; - // Move current-new stream to unoptimized old. - if let Some(stream) = self.current_new_stream.take() { - self.unoptimized_old_streams.push(stream); + // Move current-new to unoptimized old. + if let Some(stream) = self.stream_map.remove(&StreamKey::CurrentNew) + { + let idx = self.unoptimized_old_handles.len(); + self.stream_map + .insert(StreamKey::UnoptimizedOld(idx), stream); + } + if let Some(handle) = self.current_new_handle.take() { + self.unoptimized_old_handles.push(handle); } self.current_new_subs.clear(); // Start fresh current-new with overflow pubkeys. - if overflow_pks.is_empty() { - self.current_new_stream = None; - } else { + if !overflow_pks.is_empty() { for pk in overflow_pks { self.current_new_subs.insert(*pk); } - self.current_new_stream = Some(self.create_account_stream( + self.insert_current_new_stream( &overflow_pks.iter().collect::>(), commitment, from_slot, - )); + ); } - // If unoptimized old streams exceed the limit, optimize. - if self.unoptimized_old_streams.len() + // If unoptimized old handles exceed the limit, + // optimize. + if self.unoptimized_old_handles.len() > self.config.max_old_unoptimized { self.optimize(commitment); @@ -256,93 +296,88 @@ impl> StreamManager { pub fn clear_account_subscriptions(&mut self) { self.subscriptions.write().clear(); self.current_new_subs.clear(); - self.current_new_stream = None; - self.unoptimized_old_streams.clear(); - self.optimized_old_streams.clear(); + self.current_new_handle = None; + self.stream_map.remove(&StreamKey::CurrentNew); + for i in 0..self.unoptimized_old_handles.len() { + self.stream_map.remove(&StreamKey::UnoptimizedOld(i)); + } + self.unoptimized_old_handles.clear(); + for i in 0..self.optimized_old_handles.len() { + self.stream_map.remove(&StreamKey::OptimizedOld(i)); + } + self.optimized_old_handles.clear(); } /// Returns `true` if any account stream exists. pub fn has_account_subscriptions(&self) -> bool { - self.current_new_stream.is_some() - || !self.unoptimized_old_streams.is_empty() - || !self.optimized_old_streams.is_empty() + self.current_new_handle.is_some() + || !self.unoptimized_old_handles.is_empty() + || !self.optimized_old_handles.is_empty() } - /// Polls all account and program streams, returning the next + /// Polls all streams in the [StreamMap], returning the next /// available update tagged with its source. - /// Returns `None` when all streams have ended. + /// Returns `None` when the map is empty. pub async fn next_update( &mut self, ) -> Option<(StreamUpdateSource, LaserResult)> { - let mut all: Vec<(StreamUpdateSource, &mut LaserStreamWithHandle)> = - Vec::new(); - for s in &mut self.optimized_old_streams { - all.push((StreamUpdateSource::Account, s)); - } - for s in &mut self.unoptimized_old_streams { - all.push((StreamUpdateSource::Account, s)); - } - if let Some(s) = &mut self.current_new_stream { - all.push((StreamUpdateSource::Account, s)); - } - if let Some((_, s)) = &mut self.program_subscriptions { - all.push((StreamUpdateSource::Program, s)); - } - - if all.is_empty() { - return None; - } - - let futs: futures_util::stream::FuturesUnordered<_> = all - .into_iter() - .map(|(src, s)| { - let stream = &mut s.stream; - async move { (src, stream.next().await) } - }) - .collect(); - let (src, result) = futs.into_future().await.0?; - Some((src, result?)) + use tokio_stream::StreamExt; + let (key, result) = self.stream_map.next().await?; + Some((key.source(), result)) } - /// Returns `true` if any stream (account or program) - /// exists. + /// Returns `true` if any stream (account or program) exists. pub fn has_any_subscriptions(&self) -> bool { - self.has_account_subscriptions() || self.has_program_subscriptions() + !self.stream_map.is_empty() } /// Rebuild all account streams from `subscriptions`. /// /// 1. Chunk `subscriptions` into groups of /// `max_subs_in_old_optimized`. - /// 2. Create a new stream for each chunk → `optimized_old_streams`. - /// 3. Clear `unoptimized_old_streams`. + /// 2. Create a new stream for each chunk → + /// `optimized_old_handles`. + /// 3. Clear `unoptimized_old_handles`. /// 4. Reset the current-new stream (empty filter). pub fn optimize(&mut self, commitment: &CommitmentLevel) { + // Remove all account streams from the map. + self.stream_map.remove(&StreamKey::CurrentNew); + for i in 0..self.unoptimized_old_handles.len() { + self.stream_map.remove(&StreamKey::UnoptimizedOld(i)); + } + for i in 0..self.optimized_old_handles.len() { + self.stream_map.remove(&StreamKey::OptimizedOld(i)); + } + // Collect all active subscriptions and chunk them. let all_pks: Vec = self.subscriptions.read().iter().copied().collect(); // Build optimized old streams from chunks. - self.optimized_old_streams = all_pks + self.optimized_old_handles = Vec::new(); + for (i, chunk) in all_pks .chunks(self.config.max_subs_in_old_optimized) - .map(|chunk| { - let refs: Vec<&Pubkey> = chunk.iter().collect(); + .enumerate() + { + let refs: Vec<&Pubkey> = chunk.iter().collect(); + let LaserStreamWithHandle { stream, handle } = self.stream_factory.subscribe(Self::build_account_request( &refs, commitment, None, - )) - }) - .collect(); + )); + self.stream_map.insert(StreamKey::OptimizedOld(i), stream); + self.optimized_old_handles.push(handle); + } - // Clear unoptimized old streams. - self.unoptimized_old_streams.clear(); + // Clear unoptimized old handles. + self.unoptimized_old_handles.clear(); // Reset the current-new stream. self.current_new_subs.clear(); - self.current_new_stream = None; + self.current_new_handle = None; } - /// Returns `true` if the pubkey is in the active `subscriptions` - /// set. + /// Returns `true` if the pubkey is in the active + /// `subscriptions` set. pub fn is_subscribed(&self, pubkey: &Pubkey) -> bool { self.subscriptions.read().contains(pubkey) } @@ -362,47 +397,28 @@ impl> StreamManager { self.current_new_subs.len() } - /// Returns a reference to the current-new stream's pubkey set. + /// Returns a reference to the current-new stream's pubkey + /// set. fn current_new_subs(&self) -> &HashSet { &self.current_new_subs } /// Returns the number of unoptimized old streams. fn unoptimized_old_stream_count(&self) -> usize { - self.unoptimized_old_streams.len() + self.unoptimized_old_handles.len() } /// Returns the number of optimized old streams. fn optimized_old_stream_count(&self) -> usize { - self.optimized_old_streams.len() - } - - /// Returns references to all account streams (optimized old + - /// unoptimized old + current-new) for inspection. - fn all_account_streams(&self) -> Vec<&LaserStreamWithHandle> { - let mut streams = Vec::new(); - for s in &self.optimized_old_streams { - streams.push(s); - } - for s in &self.unoptimized_old_streams { - streams.push(s); - } - if let Some(s) = &self.current_new_stream { - streams.push(s); - } - streams + self.optimized_old_handles.len() } /// Returns the total number of account streams across all /// generations. fn account_stream_count(&self) -> usize { - let current = if self.current_new_stream.is_some() { - 1 - } else { - 0 - }; - self.optimized_old_streams.len() - + self.unoptimized_old_streams.len() + let current = usize::from(self.current_new_handle.is_some()); + self.optimized_old_handles.len() + + self.unoptimized_old_handles.len() + current } @@ -411,7 +427,8 @@ impl> StreamManager { // --------------------------------------------------------- /// Build a `SubscribeRequest` for the given account pubkeys. - /// Includes a slot subscription for chain slot synchronisation. + /// Includes a slot subscription for chain slot + /// synchronisation. fn build_account_request( pubkeys: &[&Pubkey], commitment: &CommitmentLevel, @@ -444,29 +461,32 @@ impl> StreamManager { } } - /// Build a `SubscribeRequest` and call the factory for the given - /// account pubkeys. - fn create_account_stream( - &self, + /// Create an account stream via the factory and insert it + /// as the current-new stream in the [StreamMap]. + fn insert_current_new_stream( + &mut self, pubkeys: &[&Pubkey], commitment: &CommitmentLevel, from_slot: Option, - ) -> LaserStreamWithHandle { + ) { let request = Self::build_account_request(pubkeys, commitment, from_slot); - self.stream_factory.subscribe(request) + let LaserStreamWithHandle { stream, handle } = + self.stream_factory.subscribe(request); + self.stream_map.insert(StreamKey::CurrentNew, stream); + self.current_new_handle = Some(handle); } /// Adds a program subscription. If the program is already - /// subscribed, this is a no-op. Otherwise, updates the program - /// stream to include all subscribed programs. + /// subscribed, this is a no-op. Otherwise, updates the + /// program stream to include all subscribed programs. pub async fn add_program_subscription( &mut self, program_id: Pubkey, commitment: &CommitmentLevel, ) -> RemoteAccountProviderResult<()> { if self - .program_subscriptions + .program_sub .as_ref() .is_some_and(|(subs, _)| subs.contains(&program_id)) { @@ -474,7 +494,7 @@ impl> StreamManager { } let mut subscribed_programs = self - .program_subscriptions + .program_sub .as_ref() .map(|(subs, _)| subs.clone()) .unwrap_or_default(); @@ -484,43 +504,31 @@ impl> StreamManager { let program_ids: Vec<&Pubkey> = subscribed_programs.iter().collect(); let request = Self::build_program_request(&program_ids, commitment); - if let Some((_, stream)) = &self.program_subscriptions { - // Update existing stream - Self::update_subscriptions( - &stream.handle, - "program_subscribe", - request, - ) - .await?; - // Update the set of subscribed programs - if let Some((subs, _)) = &mut self.program_subscriptions { + if let Some((subs, handle)) = &self.program_sub { + Self::update_subscriptions(handle, "program_subscribe", request) + .await?; + if let Some((subs, _)) = &mut self.program_sub { *subs = subscribed_programs; } } else { - // Create new stream - let stream = self.create_program_stream(&program_ids, commitment); - self.program_subscriptions = Some((subscribed_programs, stream)); + let LaserStreamWithHandle { stream, handle } = + self.create_program_stream(&program_ids, commitment); + self.stream_map.insert(StreamKey::Program, stream); + self.program_sub = Some((subscribed_programs, handle)); } Ok(()) } - /// Returns a mutable reference to the program subscriptions - /// stream (if any) for polling in the actor loop. - pub fn program_stream_mut( - &mut self, - ) -> Option<&mut LaserStreamWithHandle> { - self.program_subscriptions.as_mut().map(|(_, s)| s) - } - /// Returns whether there are active program subscriptions. pub fn has_program_subscriptions(&self) -> bool { - self.program_subscriptions.is_some() + self.program_sub.is_some() } /// Clears all program subscriptions. pub fn clear_program_subscriptions(&mut self) { - self.program_subscriptions = None; + self.stream_map.remove(&StreamKey::Program); + self.program_sub = None; } /// Build a `SubscribeRequest` for the given program IDs. @@ -613,10 +621,7 @@ mod tests { subs.len(), ); for pk in expected { - assert!( - subs.contains(pk), - "subscription set missing pubkey {pk}", - ); + assert!(subs.contains(pk), "subscription set missing pubkey {pk}",); } } @@ -1146,11 +1151,11 @@ mod tests { } // ------------------------------------------------------------- - // 8. Stream Enumeration / Polling Access + // 8. Stream Count Across Generations // ------------------------------------------------------------- #[tokio::test] - async fn test_all_account_streams_includes_all_generations() { + async fn test_account_stream_count_includes_all_generations() { let (mut mgr, _factory) = create_manager(); // Create optimized old streams. subscribe_n(&mut mgr, 15).await; @@ -1160,21 +1165,24 @@ mod tests { subscribe_n(&mut mgr, 6).await; // Current-new also exists from the overflow pubkey. - let expected = mgr.account_stream_count(); - let streams = mgr.all_account_streams(); - assert_eq!(streams.len(), expected); + let count = mgr.account_stream_count(); + assert!(count > 0); + assert_eq!( + count, + mgr.optimized_old_stream_count() + + mgr.unoptimized_old_stream_count() + + 1, // current-new + ); } #[test] - fn test_all_account_streams_empty_when_no_subscriptions() { + fn test_account_stream_count_zero_when_no_subscriptions() { let (mgr, _factory) = create_manager(); - - let streams = mgr.all_account_streams(); - assert!(streams.is_empty()); + assert_eq!(mgr.account_stream_count(), 0); } #[tokio::test] - async fn test_all_account_streams_after_optimize_drops_old_unoptimized() { + async fn test_account_stream_count_after_optimize_drops_unoptimized() { let (mut mgr, _factory) = create_manager(); // Create unoptimized old streams. for _ in 0..2 { @@ -1185,10 +1193,12 @@ mod tests { mgr.optimize(&COMMITMENT); assert_eq!(mgr.unoptimized_old_stream_count(), 0); - let streams = mgr.all_account_streams(); // Only optimized old streams remain (current-new is empty // after optimize). - assert_eq!(streams.len(), mgr.optimized_old_stream_count()); + assert_eq!( + mgr.account_stream_count(), + mgr.optimized_old_stream_count(), + ); } // ------------------------------------------------------------- From 45d7261341dfc64d26befc6c67281c5f1e7ea879 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 15:58:56 +0700 Subject: [PATCH 62/64] chore: add next_update tests --- .../chain_laser_actor/stream_manager.rs | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index b84ad579b..96746ba84 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -1449,4 +1449,111 @@ mod tests { ); } } + + // --------------------------------------------------------- + // 12. next_update Stream Updates + // --------------------------------------------------------- + + #[tokio::test] + async fn test_next_update_receives_account_updates() { + use helius_laserstream::grpc::SubscribeUpdate; + use std::time::Duration; + + let (mut mgr, factory) = create_manager(); + subscribe_n(&mut mgr, 2).await; + + factory.push_update_to_stream( + 0, + Ok(SubscribeUpdate::default()), + ); + + let result = tokio::time::timeout( + Duration::from_millis(100), + mgr.next_update(), + ) + .await + .expect("next_update timed out"); + + let (source, update) = result.expect("stream ended"); + assert_eq!(source, StreamUpdateSource::Account); + assert!(update.is_ok()); + } + + #[tokio::test] + async fn test_next_update_receives_program_updates() { + use helius_laserstream::grpc::SubscribeUpdate; + use std::time::Duration; + + let (mut mgr, factory) = create_manager(); + let program_id = Pubkey::new_unique(); + mgr.add_program_subscription(program_id, &COMMITMENT) + .await + .unwrap(); + + factory.push_update_to_stream( + 0, + Ok(SubscribeUpdate::default()), + ); + + let result = tokio::time::timeout( + Duration::from_millis(100), + mgr.next_update(), + ) + .await + .expect("next_update timed out"); + + let (source, update) = result.expect("stream ended"); + assert_eq!(source, StreamUpdateSource::Program); + assert!(update.is_ok()); + } + + #[tokio::test] + async fn test_next_update_receives_mixed_account_and_program() + { + use helius_laserstream::grpc::SubscribeUpdate; + use std::time::Duration; + + let (mut mgr, factory) = create_manager(); + + // Account stream → index 0 + subscribe_n(&mut mgr, 2).await; + // Program stream → index 1 + let program_id = Pubkey::new_unique(); + mgr.add_program_subscription(program_id, &COMMITMENT) + .await + .unwrap(); + + factory.push_update_to_stream( + 0, + Ok(SubscribeUpdate::default()), + ); + factory.push_update_to_stream( + 1, + Ok(SubscribeUpdate::default()), + ); + + let mut sources = Vec::new(); + for _ in 0..2 { + let result = tokio::time::timeout( + Duration::from_millis(100), + mgr.next_update(), + ) + .await + .expect("next_update timed out"); + + let (source, update) = + result.expect("stream ended"); + assert!(update.is_ok()); + sources.push(source); + } + + assert!( + sources.contains(&StreamUpdateSource::Account), + "expected an Account update", + ); + assert!( + sources.contains(&StreamUpdateSource::Program), + "expected a Program update", + ); + } } From 22414b357c46762ffdd91131a17964f03b35829e Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 15:59:53 +0700 Subject: [PATCH 63/64] chore: fmt --- .../chain_laser_actor/actor.rs | 17 ++---- .../chain_laser_actor/mod.rs | 6 +- .../chain_laser_actor/stream_manager.rs | 55 +++++++------------ 3 files changed, 26 insertions(+), 52 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs index 0d7afb9e5..dc72d5c70 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/actor.rs @@ -210,12 +210,9 @@ impl> ChainLaserActor { mpsc::channel(MESSAGE_CHANNEL_SIZE); let commitment = grpc_commitment_from_solana(commitment); - let stream_manager = StreamManager::new( - StreamManagerConfig::default(), - stream_factory, - ); - let shared_subscriptions = - Arc::clone(stream_manager.subscriptions()); + let stream_manager = + StreamManager::new(StreamManagerConfig::default(), stream_factory); + let shared_subscriptions = Arc::clone(stream_manager.subscriptions()); let me = Self { stream_manager, @@ -324,9 +321,7 @@ impl> ChainLaserActor { } Shutdown { response } => { info!(client_id = self.client_id, "Received Shutdown message"); - Self::clear_subscriptions( - &mut self.stream_manager, - ); + Self::clear_subscriptions(&mut self.stream_manager); let _ = response.send(Ok(())).inspect_err(|_| { warn!( client_id = self.client_id, @@ -559,9 +554,7 @@ impl> ChainLaserActor { }); } - fn clear_subscriptions( - stream_manager: &mut StreamManager, - ) { + fn clear_subscriptions(stream_manager: &mut StreamManager) { stream_manager.clear_account_subscriptions(); stream_manager.clear_program_subscriptions(); } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index c9a6f39a0..e68c265f7 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -1,8 +1,4 @@ -use std::{ - collections::HashSet, - pin::Pin, - sync::Arc, -}; +use std::{collections::HashSet, pin::Pin, sync::Arc}; use async_trait::async_trait; use futures_util::Stream; diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs index 96746ba84..3179025bd 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/stream_manager.rs @@ -1456,23 +1456,19 @@ mod tests { #[tokio::test] async fn test_next_update_receives_account_updates() { - use helius_laserstream::grpc::SubscribeUpdate; use std::time::Duration; + use helius_laserstream::grpc::SubscribeUpdate; + let (mut mgr, factory) = create_manager(); subscribe_n(&mut mgr, 2).await; - factory.push_update_to_stream( - 0, - Ok(SubscribeUpdate::default()), - ); + factory.push_update_to_stream(0, Ok(SubscribeUpdate::default())); - let result = tokio::time::timeout( - Duration::from_millis(100), - mgr.next_update(), - ) - .await - .expect("next_update timed out"); + let result = + tokio::time::timeout(Duration::from_millis(100), mgr.next_update()) + .await + .expect("next_update timed out"); let (source, update) = result.expect("stream ended"); assert_eq!(source, StreamUpdateSource::Account); @@ -1481,26 +1477,22 @@ mod tests { #[tokio::test] async fn test_next_update_receives_program_updates() { - use helius_laserstream::grpc::SubscribeUpdate; use std::time::Duration; + use helius_laserstream::grpc::SubscribeUpdate; + let (mut mgr, factory) = create_manager(); let program_id = Pubkey::new_unique(); mgr.add_program_subscription(program_id, &COMMITMENT) .await .unwrap(); - factory.push_update_to_stream( - 0, - Ok(SubscribeUpdate::default()), - ); + factory.push_update_to_stream(0, Ok(SubscribeUpdate::default())); - let result = tokio::time::timeout( - Duration::from_millis(100), - mgr.next_update(), - ) - .await - .expect("next_update timed out"); + let result = + tokio::time::timeout(Duration::from_millis(100), mgr.next_update()) + .await + .expect("next_update timed out"); let (source, update) = result.expect("stream ended"); assert_eq!(source, StreamUpdateSource::Program); @@ -1508,11 +1500,11 @@ mod tests { } #[tokio::test] - async fn test_next_update_receives_mixed_account_and_program() - { - use helius_laserstream::grpc::SubscribeUpdate; + async fn test_next_update_receives_mixed_account_and_program() { use std::time::Duration; + use helius_laserstream::grpc::SubscribeUpdate; + let (mut mgr, factory) = create_manager(); // Account stream → index 0 @@ -1523,14 +1515,8 @@ mod tests { .await .unwrap(); - factory.push_update_to_stream( - 0, - Ok(SubscribeUpdate::default()), - ); - factory.push_update_to_stream( - 1, - Ok(SubscribeUpdate::default()), - ); + factory.push_update_to_stream(0, Ok(SubscribeUpdate::default())); + factory.push_update_to_stream(1, Ok(SubscribeUpdate::default())); let mut sources = Vec::new(); for _ in 0..2 { @@ -1541,8 +1527,7 @@ mod tests { .await .expect("next_update timed out"); - let (source, update) = - result.expect("stream ended"); + let (source, update) = result.expect("stream ended"); assert!(update.is_ok()); sources.push(source); } From e9405422f03e5205aa545f822fc50b458c49ace0 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 19 Feb 2026 16:03:52 +0700 Subject: [PATCH 64/64] chore: remove dead_code --- .../src/remote_account_provider/chain_laser_actor/mock.rs | 3 --- .../src/remote_account_provider/chain_laser_actor/mod.rs | 4 +--- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs index 04e6a4d28..b63b34f5f 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mock.rs @@ -16,7 +16,6 @@ use crate::remote_account_provider::chain_laser_actor::{ /// A test mock that captures subscription requests and allows driving /// streams programmatically. #[derive(Clone)] -#[allow(dead_code)] pub struct MockStreamFactory { /// Every `SubscribeRequest` passed to `subscribe()` is recorded /// here so tests can assert on filter contents, commitment levels, @@ -35,7 +34,6 @@ pub struct MockStreamFactory { stream_senders: Arc>>>>, } -#[allow(dead_code)] impl MockStreamFactory { /// Create a new mock stream factory pub fn new() -> Self { @@ -111,7 +109,6 @@ impl Default for MockStreamFactory { /// Mock handle that records write requests and drains them into the /// shared `handle_requests` vec on the factory. #[derive(Clone)] -#[allow(dead_code)] pub struct MockStreamHandle { handle_requests: Arc>>, } diff --git a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs index e68c265f7..182e0fb55 100644 --- a/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs +++ b/magicblock-chainlink/src/remote_account_provider/chain_laser_actor/mod.rs @@ -17,6 +17,7 @@ pub use self::{ pub type SharedSubscriptions = Arc>>; mod actor; +#[cfg(test)] mod mock; mod stream_manager; @@ -36,7 +37,6 @@ pub trait StreamFactory: Send + Sync + 'static { /// This is needed since we cannot create the helius one since /// [helius_laserstream::StreamHandle::write_tx] is private and there is no constructor. #[async_trait] -#[allow(dead_code)] pub trait StreamHandle { /// Send a new subscription request to update the active subscription. async fn write( @@ -45,13 +45,11 @@ pub trait StreamHandle { ) -> Result<(), LaserstreamError>; } -#[allow(dead_code)] pub struct LaserStreamWithHandle { pub(crate) stream: LaserStream, pub(crate) handle: S, } -#[allow(dead_code)] pub struct StreamHandleImpl { pub handle: HeliusStreamHandle, }