From b3bf07f231a430ba1b15841ed9b467f5ab018112 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Mon, 8 Dec 2025 11:56:49 +0100 Subject: [PATCH 1/4] bytecode and ram pinning --- Cargo.lock | 143 +++++ Cargo.toml | 1 + crates/ev-revm/Cargo.toml | 6 + crates/ev-revm/benches/cache_benchmark.rs | 377 +++++++++++ crates/ev-revm/src/cache.rs | 737 ++++++++++++++++++++++ crates/ev-revm/src/lib.rs | 2 + crates/node/src/builder.rs | 56 +- crates/node/src/config.rs | 73 +++ 8 files changed, 1391 insertions(+), 4 deletions(-) create mode 100644 crates/ev-revm/benches/cache_benchmark.rs create mode 100644 crates/ev-revm/src/cache.rs diff --git a/Cargo.lock b/Cargo.lock index a4bf7f2..2c4fad3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -954,6 +954,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstream" version = "0.6.19" @@ -1791,6 +1797,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "castaway" version = "0.2.4" @@ -1853,6 +1865,33 @@ dependencies = [ "windows-link", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.4.4" @@ -2131,6 +2170,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + [[package]] name = "critical-section" version = "1.2.0" @@ -2992,7 +3067,9 @@ dependencies = [ "alloy-evm", "alloy-primitives", "alloy-sol-types", + "criterion", "ev-precompiles", + "rand 0.8.5", "reth-evm", "reth-evm-ethereum", "reth-primitives", @@ -3545,6 +3622,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "zerocopy", +] + [[package]] name = "hash-db" version = "0.15.2" @@ -4236,6 +4324,17 @@ dependencies = [ "serde", ] +[[package]] +name = "is-terminal" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.60.2", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.1" @@ -5250,6 +5349,12 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "op-alloy-consensus" version = "0.22.1" @@ -5664,6 +5769,34 @@ dependencies = [ "crunchy", ] +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "polyval" version = "0.6.2" @@ -10342,6 +10475,16 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.9.0" diff --git a/Cargo.toml b/Cargo.toml index d226fcf..cf6b6b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -138,6 +138,7 @@ rand = "0.8" tempfile = "3.10" hex = "0.4" url = "2.5" +criterion = { version = "0.5", features = ["html_reports"] } [workspace.lints] rust.missing_debug_implementations = "warn" diff --git a/crates/ev-revm/Cargo.toml b/crates/ev-revm/Cargo.toml index 9ee6715..9718833 100644 --- a/crates/ev-revm/Cargo.toml +++ b/crates/ev-revm/Cargo.toml @@ -22,6 +22,12 @@ ev-precompiles = { path = "../ev-precompiles" } [dev-dependencies] alloy-sol-types.workspace = true +criterion.workspace = true +rand.workspace = true + +[[bench]] +name = "cache_benchmark" +harness = false [lints] workspace = true diff --git a/crates/ev-revm/benches/cache_benchmark.rs b/crates/ev-revm/benches/cache_benchmark.rs new file mode 100644 index 0000000..d988b9f --- /dev/null +++ b/crates/ev-revm/benches/cache_benchmark.rs @@ -0,0 +1,377 @@ +//! Benchmarks for EVM caching layers. +//! +//! This benchmark compares performance of: +//! - Direct database access vs bytecode caching +//! - Direct database access vs pinned storage caching +//! +//! Run with: cargo bench -p ev-revm + +use alloy_primitives::{Address, B256, U256}; +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use ev_revm::cache::{BytecodeCache, CachedDatabase, PinnedStorageCache}; +use rand::{Rng, SeedableRng}; +use reth_revm::revm::{ + context_interface::Database, + state::{AccountInfo, Bytecode}, +}; +use std::{collections::HashMap, sync::Arc}; + +/// Mock database with configurable latency simulation. +/// In real usage, database access involves disk I/O which is orders of magnitude slower. +#[derive(Debug)] +struct MockDatabase { + bytecodes: HashMap, + storage: HashMap<(Address, U256), U256>, + /// Simulated latency per operation (in nanoseconds worth of work) + latency_factor: usize, +} + +impl MockDatabase { + fn new(latency_factor: usize) -> Self { + Self { + bytecodes: HashMap::new(), + storage: HashMap::new(), + latency_factor, + } + } + + fn with_bytecodes(mut self, count: usize) -> Self { + for i in 0..count { + let code_hash = B256::repeat_byte((i % 256) as u8); + // Create realistic bytecode (average contract ~5KB) + let bytecode_size = 5000 + (i % 1000); + let mut code = vec![0x60u8; bytecode_size]; // PUSH1 opcodes + code[0] = 0x60; + code[1] = (i % 256) as u8; + self.bytecodes.insert(code_hash, Bytecode::new_raw(code.into())); + } + self + } + + fn with_storage(mut self, address: Address, slot_count: usize) -> Self { + for i in 0..slot_count { + let slot = U256::from(i); + let value = U256::from(i * 1000); + self.storage.insert((address, slot), value); + } + self + } + + /// Simulate work to represent disk I/O latency + fn simulate_latency(&self) { + // Do some work proportional to latency_factor + // This simulates the overhead of disk access + let mut dummy: u64 = 1; + for _ in 0..self.latency_factor { + dummy = dummy.wrapping_mul(7).wrapping_add(11); + } + // Prevent optimization + black_box(dummy); + } +} + +impl Database for MockDatabase { + type Error = std::convert::Infallible; + + fn basic(&mut self, _address: Address) -> Result, Self::Error> { + self.simulate_latency(); + Ok(None) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + self.simulate_latency(); + Ok(self.bytecodes.get(&code_hash).cloned().unwrap_or_default()) + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + self.simulate_latency(); + Ok(self.storage.get(&(address, index)).copied().unwrap_or(U256::ZERO)) + } + + fn block_hash(&mut self, _number: u64) -> Result { + self.simulate_latency(); + Ok(B256::ZERO) + } +} + +/// Benchmark bytecode cache hit performance +fn bench_bytecode_cache_hit(c: &mut Criterion) { + let mut group = c.benchmark_group("bytecode_cache"); + + // Different cache hit scenarios + for &num_contracts in &[10, 100, 1000] { + group.throughput(Throughput::Elements(num_contracts as u64)); + + // Setup: pre-populate cache + let cache = Arc::new(BytecodeCache::new(10_000)); + let mock_db = MockDatabase::new(100).with_bytecodes(num_contracts); + + // Pre-warm cache + let code_hashes: Vec<_> = (0..num_contracts) + .map(|i| B256::repeat_byte((i % 256) as u8)) + .collect(); + + for hash in &code_hashes { + if let Some(bytecode) = mock_db.bytecodes.get(hash) { + cache.insert(*hash, bytecode.clone()); + } + } + + let mut cached_db = CachedDatabase::new(mock_db, cache); + + group.bench_with_input( + BenchmarkId::new("cache_hit", num_contracts), + &code_hashes, + |b, hashes| { + b.iter(|| { + for hash in hashes { + let _ = black_box(cached_db.code_by_hash(*hash)); + } + }) + }, + ); + } + + group.finish(); +} + +/// Benchmark bytecode cache miss vs hit comparison +fn bench_bytecode_cache_miss_vs_hit(c: &mut Criterion) { + let mut group = c.benchmark_group("bytecode_cache_comparison"); + + let num_lookups = 100; + group.throughput(Throughput::Elements(num_lookups as u64)); + + // Setup databases with simulated latency + let latency_factor = 1000; // Simulate ~1000 cycles of work per DB access + + // Pre-generate code hashes + let code_hashes: Vec<_> = (0..num_lookups) + .map(|i| B256::repeat_byte((i % 256) as u8)) + .collect(); + + // Benchmark: No caching (direct DB access) + { + let mock_db = MockDatabase::new(latency_factor).with_bytecodes(num_lookups); + + // Wrap in CachedDatabase but don't pre-warm (always miss) + let cache = Arc::new(BytecodeCache::new(10_000)); + let mut cached_db = CachedDatabase::new(mock_db, cache); + + group.bench_function("always_miss", |b| { + b.iter(|| { + // Clear cache to ensure misses + cached_db.cache().clear(); + for hash in &code_hashes { + let _ = black_box(cached_db.code_by_hash(*hash)); + } + }) + }); + } + + // Benchmark: With caching (all hits after first pass) + { + let mock_db = MockDatabase::new(latency_factor).with_bytecodes(num_lookups); + let cache = Arc::new(BytecodeCache::new(10_000)); + let mut cached_db = CachedDatabase::new(mock_db, cache); + + // Pre-warm cache + for hash in &code_hashes { + let _ = cached_db.code_by_hash(*hash); + } + + group.bench_function("always_hit", |b| { + b.iter(|| { + for hash in &code_hashes { + let _ = black_box(cached_db.code_by_hash(*hash)); + } + }) + }); + } + + group.finish(); +} + +/// Benchmark pinned storage cache performance +fn bench_pinned_storage_cache(c: &mut Criterion) { + let mut group = c.benchmark_group("pinned_storage_cache"); + + let pinned_contract = Address::repeat_byte(0x42); + let num_slots = 100; + group.throughput(Throughput::Elements(num_slots as u64)); + + let slots: Vec<_> = (0..num_slots).map(U256::from).collect(); + let latency_factor = 1000; + + // Benchmark: Non-pinned storage (no caching) + { + let non_pinned = Address::repeat_byte(0x01); + let mock_db = MockDatabase::new(latency_factor).with_storage(non_pinned, num_slots); + let bytecode_cache = Arc::new(BytecodeCache::new(100)); + let pinned_storage = Arc::new(PinnedStorageCache::new(vec![pinned_contract])); // Different address + let mut cached_db = + CachedDatabase::with_pinned_storage(mock_db, bytecode_cache, pinned_storage); + + group.bench_function("non_pinned", |b| { + b.iter(|| { + for slot in &slots { + let _ = black_box(cached_db.storage(non_pinned, *slot)); + } + }) + }); + } + + // Benchmark: Pinned storage (cache hit) + { + let mock_db = MockDatabase::new(latency_factor).with_storage(pinned_contract, num_slots); + let bytecode_cache = Arc::new(BytecodeCache::new(100)); + let pinned_storage = Arc::new(PinnedStorageCache::new(vec![pinned_contract])); + let mut cached_db = + CachedDatabase::with_pinned_storage(mock_db, bytecode_cache, pinned_storage); + + // Pre-warm cache + for slot in &slots { + let _ = cached_db.storage(pinned_contract, *slot); + } + + group.bench_function("pinned_hit", |b| { + b.iter(|| { + for slot in &slots { + let _ = black_box(cached_db.storage(pinned_contract, *slot)); + } + }) + }); + } + + group.finish(); +} + +/// Benchmark mixed workload with realistic access patterns +fn bench_mixed_workload(c: &mut Criterion) { + let mut group = c.benchmark_group("mixed_workload"); + + let num_contracts = 50; + let num_pinned = 5; + let slots_per_contract = 20; + let iterations = 100; + + group.throughput(Throughput::Elements(iterations as u64)); + + // Setup: Multiple contracts, some pinned + let pinned_contracts: Vec<_> = (0..num_pinned).map(|i| Address::repeat_byte(i as u8)).collect(); + let all_contracts: Vec<_> = (0..num_contracts) + .map(|i| Address::repeat_byte(i as u8)) + .collect(); + let code_hashes: Vec<_> = (0..num_contracts) + .map(|i| B256::repeat_byte(i as u8)) + .collect(); + let slots: Vec<_> = (0..slots_per_contract).map(U256::from).collect(); + + let latency_factor = 500; + + // Create mock database with all storage + let mut mock_db = MockDatabase::new(latency_factor).with_bytecodes(num_contracts); + for contract in &all_contracts { + mock_db = MockDatabase { + bytecodes: mock_db.bytecodes, + storage: { + let mut s = mock_db.storage; + for i in 0..slots_per_contract { + s.insert((*contract, U256::from(i)), U256::from(i * 1000)); + } + s + }, + latency_factor, + }; + } + + // Benchmark: No caching + { + let db = MockDatabase::new(latency_factor).with_bytecodes(num_contracts); + let db = MockDatabase { + bytecodes: db.bytecodes, + storage: mock_db.storage.clone(), + latency_factor, + }; + + let bytecode_cache = Arc::new(BytecodeCache::new(100)); + let pinned_storage = Arc::new(PinnedStorageCache::empty()); // No pinning + let mut cached_db = + CachedDatabase::with_pinned_storage(db, bytecode_cache, pinned_storage); + + // Simulate realistic access: read bytecode, then storage slots + let mut rng = rand::rngs::StdRng::seed_from_u64(42); + + group.bench_function("no_pinning", |b| { + b.iter(|| { + cached_db.cache().clear(); + if let Some(ps) = cached_db.pinned_storage() { + ps.clear(); + } + for _ in 0..iterations { + let contract_idx = rng.gen_range(0..num_contracts); + let _ = black_box(cached_db.code_by_hash(code_hashes[contract_idx])); + for _ in 0..3 { + let slot_idx = rng.gen_range(0..slots_per_contract); + let _ = black_box( + cached_db.storage(all_contracts[contract_idx], slots[slot_idx]), + ); + } + } + }) + }); + } + + // Benchmark: With bytecode + pinned storage caching + { + let db = MockDatabase::new(latency_factor).with_bytecodes(num_contracts); + let db = MockDatabase { + bytecodes: db.bytecodes, + storage: mock_db.storage.clone(), + latency_factor, + }; + + let bytecode_cache = Arc::new(BytecodeCache::new(100)); + let pinned_storage = Arc::new(PinnedStorageCache::new(pinned_contracts.clone())); + let mut cached_db = + CachedDatabase::with_pinned_storage(db, bytecode_cache, pinned_storage); + + // Pre-warm caches + for hash in &code_hashes { + let _ = cached_db.code_by_hash(*hash); + } + for contract in &pinned_contracts { + for slot in &slots { + let _ = cached_db.storage(*contract, *slot); + } + } + + let mut rng = rand::rngs::StdRng::seed_from_u64(42); + + group.bench_function("with_caching", |b| { + b.iter(|| { + for _ in 0..iterations { + let contract_idx = rng.gen_range(0..num_contracts); + let _ = black_box(cached_db.code_by_hash(code_hashes[contract_idx])); + for _ in 0..3 { + let slot_idx = rng.gen_range(0..slots_per_contract); + let _ = black_box( + cached_db.storage(all_contracts[contract_idx], slots[slot_idx]), + ); + } + } + }) + }); + } + + group.finish(); +} + +criterion_group!( + benches, + bench_bytecode_cache_hit, + bench_bytecode_cache_miss_vs_hit, + bench_pinned_storage_cache, + bench_mixed_workload, +); +criterion_main!(benches); diff --git a/crates/ev-revm/src/cache.rs b/crates/ev-revm/src/cache.rs new file mode 100644 index 0000000..5a5943b --- /dev/null +++ b/crates/ev-revm/src/cache.rs @@ -0,0 +1,737 @@ +//! Caching layer for EVM database operations. +//! +//! This module provides cache wrappers for database operations: +//! - `BytecodeCache`: Caches immutable contract bytecode +//! - `PinnedStorageCache`: Pins storage slots for specific contracts in RAM + +use alloy_primitives::{Address, B256, U256}; +use reth_revm::revm::{ + context_interface::Database, + state::{AccountInfo, Bytecode}, +}; +use std::{ + collections::{HashMap, HashSet}, + sync::{Arc, RwLock}, +}; + +/// Thread-safe bytecode cache using LRU eviction strategy. +/// +/// Contract bytecode is immutable after deployment, making it an ideal +/// candidate for caching. This cache stores bytecode by its code hash, +/// avoiding repeated database lookups for frequently-called contracts. +#[derive(Debug)] +pub struct BytecodeCache { + /// The actual cache storage, protected by a RwLock for thread-safety. + /// Values are Arc'd to allow cheap cloning when returning cached bytecode. + cache: RwLock, + /// Maximum number of entries before eviction + max_entries: usize, +} + +/// Simple LRU cache implementation +#[derive(Debug)] +struct LruCache { + /// Map from code hash to (bytecode, access_order) + entries: HashMap, u64)>, + /// Counter for tracking access order + access_counter: u64, +} + +impl LruCache { + fn new() -> Self { + Self { + entries: HashMap::new(), + access_counter: 0, + } + } + + fn get(&mut self, key: &B256) -> Option> { + if let Some((bytecode, order)) = self.entries.get_mut(key) { + self.access_counter += 1; + *order = self.access_counter; + Some(Arc::clone(bytecode)) + } else { + None + } + } + + fn insert(&mut self, key: B256, value: Bytecode, max_entries: usize) { + // Evict oldest entries if at capacity + if self.entries.len() >= max_entries { + self.evict_oldest(max_entries / 2); + } + + self.access_counter += 1; + self.entries.insert(key, (Arc::new(value), self.access_counter)); + } + + fn evict_oldest(&mut self, count: usize) { + if count == 0 || self.entries.is_empty() { + return; + } + + // Collect entries sorted by access order (oldest first) + let mut entries: Vec<_> = self.entries.iter().map(|(k, (_, o))| (*k, *o)).collect(); + entries.sort_by_key(|(_, order)| *order); + + // Remove the oldest entries + for (key, _) in entries.into_iter().take(count) { + self.entries.remove(&key); + } + } + + fn len(&self) -> usize { + self.entries.len() + } +} + +impl BytecodeCache { + /// Creates a new bytecode cache with the specified maximum number of entries. + /// + /// # Arguments + /// * `max_entries` - Maximum number of bytecode entries to cache before eviction + /// + /// # Panics + /// Panics if `max_entries` is 0. + pub fn new(max_entries: usize) -> Self { + assert!(max_entries > 0, "max_entries must be greater than 0"); + Self { + cache: RwLock::new(LruCache::new()), + max_entries, + } + } + + /// Creates a new bytecode cache with default capacity (10,000 entries). + /// + /// This is suitable for most use cases, providing cache for approximately + /// 10,000 unique contracts. + pub fn with_default_capacity() -> Self { + Self::new(10_000) + } + + /// Retrieves bytecode from the cache if present. + /// + /// Returns `None` if the bytecode is not cached. + pub fn get(&self, code_hash: &B256) -> Option { + let mut cache = self.cache.write().expect("cache lock poisoned"); + cache.get(code_hash).map(|arc| (*arc).clone()) + } + + /// Inserts bytecode into the cache. + /// + /// If the cache is at capacity, older entries will be evicted using LRU policy. + pub fn insert(&self, code_hash: B256, bytecode: Bytecode) { + // Don't cache empty bytecode + if bytecode.is_empty() { + return; + } + + let mut cache = self.cache.write().expect("cache lock poisoned"); + cache.insert(code_hash, bytecode, self.max_entries); + } + + /// Returns the current number of cached entries. + pub fn len(&self) -> usize { + self.cache.read().expect("cache lock poisoned").len() + } + + /// Returns true if the cache is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Clears all entries from the cache. + pub fn clear(&self) { + let mut cache = self.cache.write().expect("cache lock poisoned"); + cache.entries.clear(); + cache.access_counter = 0; + } +} + +impl Default for BytecodeCache { + fn default() -> Self { + Self::with_default_capacity() + } +} + +// ============================================================================ +// Pinned Storage Cache +// ============================================================================ + +/// RAM-pinned storage cache for hot contracts. +/// +/// This cache stores storage slots for explicitly configured contracts in RAM, +/// providing fast access for frequently-accessed contracts like DEXes, bridges, +/// or popular tokens. +/// +/// Unlike the bytecode cache which uses LRU eviction, pinned storage is permanent +/// for the duration of the node's runtime - the configured contracts are always +/// kept in RAM. +#[derive(Debug)] +pub struct PinnedStorageCache { + /// Set of contract addresses that should be pinned + pinned_addresses: HashSet
, + /// Storage slots for pinned contracts: address -> (slot -> value) + storage: RwLock>>, +} + +impl PinnedStorageCache { + /// Creates a new pinned storage cache for the given contract addresses. + /// + /// Only storage reads/writes for these addresses will be cached. + pub fn new(pinned_addresses: Vec
) -> Self { + let addresses: HashSet<_> = pinned_addresses.into_iter().collect(); + let storage = addresses.iter().map(|addr| (*addr, HashMap::new())).collect(); + + Self { + pinned_addresses: addresses, + storage: RwLock::new(storage), + } + } + + /// Creates an empty cache with no pinned contracts. + pub fn empty() -> Self { + Self { + pinned_addresses: HashSet::new(), + storage: RwLock::new(HashMap::new()), + } + } + + /// Returns true if the given address is configured for pinning. + #[inline] + pub fn is_pinned(&self, address: &Address) -> bool { + self.pinned_addresses.contains(address) + } + + /// Returns the set of pinned addresses. + pub fn pinned_addresses(&self) -> &HashSet
{ + &self.pinned_addresses + } + + /// Retrieves a storage value from the cache. + /// + /// Returns `None` if: + /// - The address is not a pinned contract + /// - The slot has not been cached yet + pub fn get_storage(&self, address: &Address, slot: &U256) -> Option { + if !self.is_pinned(address) { + return None; + } + + let storage = self.storage.read().expect("storage lock poisoned"); + storage.get(address)?.get(slot).copied() + } + + /// Stores a storage value in the cache. + /// + /// Only stores if the address is a pinned contract. + pub fn set_storage(&self, address: Address, slot: U256, value: U256) { + if !self.is_pinned(&address) { + return; + } + + let mut storage = self.storage.write().expect("storage lock poisoned"); + storage.entry(address).or_default().insert(slot, value); + } + + /// Returns the number of cached storage slots for a given address. + pub fn slot_count(&self, address: &Address) -> usize { + self.storage + .read() + .expect("storage lock poisoned") + .get(address) + .map(|slots| slots.len()) + .unwrap_or(0) + } + + /// Returns the total number of cached storage slots across all contracts. + pub fn total_slot_count(&self) -> usize { + self.storage + .read() + .expect("storage lock poisoned") + .values() + .map(|slots| slots.len()) + .sum() + } + + /// Clears all cached storage for a specific address. + pub fn clear_address(&self, address: &Address) { + if let Some(slots) = self + .storage + .write() + .expect("storage lock poisoned") + .get_mut(address) + { + slots.clear(); + } + } + + /// Clears all cached storage. + pub fn clear(&self) { + let mut storage = self.storage.write().expect("storage lock poisoned"); + for slots in storage.values_mut() { + slots.clear(); + } + } +} + +impl Default for PinnedStorageCache { + fn default() -> Self { + Self::empty() + } +} + +// ============================================================================ +// Cached Database +// ============================================================================ + +/// A database wrapper that adds bytecode and storage caching to any underlying database. +/// +/// This wrapper provides two levels of caching: +/// - **Bytecode caching**: Caches immutable contract bytecode by code hash +/// - **Pinned storage**: RAM-pins storage slots for explicitly configured contracts +/// +/// # Example +/// +/// ```ignore +/// use ev_revm::cache::{BytecodeCache, PinnedStorageCache, CachedDatabase}; +/// use std::sync::Arc; +/// +/// let inner_db = StateProviderDatabase::new(&state_provider); +/// let bytecode_cache = Arc::new(BytecodeCache::with_default_capacity()); +/// let pinned_storage = Arc::new(PinnedStorageCache::new(vec![uniswap_address, usdc_address])); +/// let cached_db = CachedDatabase::with_pinned_storage(inner_db, bytecode_cache, pinned_storage); +/// ``` +#[derive(Debug)] +pub struct CachedDatabase { + /// The underlying database + inner: DB, + /// Shared bytecode cache + bytecode_cache: Arc, + /// Optional pinned storage cache for hot contracts + pinned_storage: Option>, +} + +impl CachedDatabase { + /// Creates a new cached database wrapper with bytecode caching only. + /// + /// # Arguments + /// * `inner` - The underlying database to wrap + /// * `bytecode_cache` - Shared bytecode cache (can be shared across multiple databases) + pub fn new(inner: DB, bytecode_cache: Arc) -> Self { + Self { + inner, + bytecode_cache, + pinned_storage: None, + } + } + + /// Creates a new cached database wrapper with both bytecode and pinned storage caching. + /// + /// # Arguments + /// * `inner` - The underlying database to wrap + /// * `bytecode_cache` - Shared bytecode cache + /// * `pinned_storage` - Shared pinned storage cache for hot contracts + pub fn with_pinned_storage( + inner: DB, + bytecode_cache: Arc, + pinned_storage: Arc, + ) -> Self { + Self { + inner, + bytecode_cache, + pinned_storage: Some(pinned_storage), + } + } + + /// Returns a reference to the underlying database. + pub fn inner(&self) -> &DB { + &self.inner + } + + /// Returns a mutable reference to the underlying database. + pub fn inner_mut(&mut self) -> &mut DB { + &mut self.inner + } + + /// Consumes the wrapper and returns the underlying database. + pub fn into_inner(self) -> DB { + self.inner + } + + /// Returns a reference to the bytecode cache. + pub fn bytecode_cache(&self) -> &Arc { + &self.bytecode_cache + } + + /// Returns a reference to the bytecode cache (alias for backwards compatibility). + pub fn cache(&self) -> &Arc { + &self.bytecode_cache + } + + /// Returns a reference to the pinned storage cache, if configured. + pub fn pinned_storage(&self) -> Option<&Arc> { + self.pinned_storage.as_ref() + } +} + +impl Database for CachedDatabase { + type Error = DB::Error; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + self.inner.basic(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + // Check bytecode cache first + if let Some(cached) = self.bytecode_cache.get(&code_hash) { + return Ok(cached); + } + + // Cache miss - fetch from underlying database + let bytecode = self.inner.code_by_hash(code_hash)?; + + // Cache for future use + self.bytecode_cache.insert(code_hash, bytecode.clone()); + + Ok(bytecode) + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + // Check pinned storage cache first + if let Some(pinned) = &self.pinned_storage { + if let Some(value) = pinned.get_storage(&address, &index) { + return Ok(value); + } + } + + // Cache miss or not pinned - fetch from underlying database + let value = self.inner.storage(address, index)?; + + // Cache for future use if this is a pinned contract + if let Some(pinned) = &self.pinned_storage { + pinned.set_storage(address, index, value); + } + + Ok(value) + } + + fn block_hash(&mut self, number: u64) -> Result { + self.inner.block_hash(number) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::bytes; + + #[test] + fn test_bytecode_cache_basic_operations() { + let cache = BytecodeCache::new(100); + + // Create a test bytecode + let code_hash = B256::repeat_byte(0x42); + let bytecode = Bytecode::new_raw(bytes!("6080604052").into()); + + // Initially not in cache + assert!(cache.get(&code_hash).is_none()); + + // Insert into cache + cache.insert(code_hash, bytecode.clone()); + + // Now should be retrievable + let cached = cache.get(&code_hash).expect("should be cached"); + assert_eq!(cached.bytes(), bytecode.bytes()); + } + + #[test] + fn test_bytecode_cache_empty_bytecode_not_cached() { + let cache = BytecodeCache::new(100); + let code_hash = B256::repeat_byte(0x42); + let empty_bytecode = Bytecode::new(); + + cache.insert(code_hash, empty_bytecode); + + // Empty bytecode should not be cached + assert!(cache.get(&code_hash).is_none()); + } + + #[test] + fn test_bytecode_cache_lru_eviction() { + let cache = BytecodeCache::new(3); + + // Insert 3 entries + for i in 0..3u8 { + let code_hash = B256::repeat_byte(i); + let bytecode = Bytecode::new_raw(vec![0x60, i].into()); + cache.insert(code_hash, bytecode); + } + + assert_eq!(cache.len(), 3); + + // Access entry 0 to make it recently used + cache.get(&B256::repeat_byte(0)); + + // Insert a 4th entry, should evict entry 1 (least recently used) + let code_hash_3 = B256::repeat_byte(3); + cache.insert(code_hash_3, Bytecode::new_raw(vec![0x60, 3].into())); + + // Entry 0 should still be present (was accessed) + assert!(cache.get(&B256::repeat_byte(0)).is_some()); + // Entry 3 should be present (just added) + assert!(cache.get(&B256::repeat_byte(3)).is_some()); + } + + #[test] + fn test_bytecode_cache_clear() { + let cache = BytecodeCache::new(100); + + // Insert some entries + for i in 0..5u8 { + let code_hash = B256::repeat_byte(i); + let bytecode = Bytecode::new_raw(vec![0x60, i].into()); + cache.insert(code_hash, bytecode); + } + + assert_eq!(cache.len(), 5); + + cache.clear(); + + assert!(cache.is_empty()); + } + + #[test] + #[should_panic(expected = "max_entries must be greater than 0")] + fn test_bytecode_cache_zero_capacity_panics() { + BytecodeCache::new(0); + } + + // Mock database for testing CachedDatabase + #[derive(Debug, Default)] + struct MockDatabase { + bytecodes: HashMap, + storage: HashMap<(Address, U256), U256>, + code_by_hash_call_count: std::cell::Cell, + storage_call_count: std::cell::Cell, + } + + impl MockDatabase { + fn new() -> Self { + Self::default() + } + + fn with_bytecode(mut self, code_hash: B256, bytecode: Bytecode) -> Self { + self.bytecodes.insert(code_hash, bytecode); + self + } + + fn with_storage(mut self, address: Address, slot: U256, value: U256) -> Self { + self.storage.insert((address, slot), value); + self + } + + fn code_by_hash_call_count(&self) -> usize { + self.code_by_hash_call_count.get() + } + + fn storage_call_count(&self) -> usize { + self.storage_call_count.get() + } + } + + impl Database for MockDatabase { + type Error = std::convert::Infallible; + + fn basic(&mut self, _address: Address) -> Result, Self::Error> { + Ok(None) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + self.code_by_hash_call_count.set(self.code_by_hash_call_count.get() + 1); + Ok(self.bytecodes.get(&code_hash).cloned().unwrap_or_default()) + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + self.storage_call_count.set(self.storage_call_count.get() + 1); + Ok(self.storage.get(&(address, index)).copied().unwrap_or(U256::ZERO)) + } + + fn block_hash(&mut self, _number: u64) -> Result { + Ok(B256::ZERO) + } + } + + #[test] + fn test_cached_database_cache_hit() { + let code_hash = B256::repeat_byte(0x42); + let bytecode = Bytecode::new_raw(bytes!("6080604052").into()); + + let mock_db = MockDatabase::new().with_bytecode(code_hash, bytecode.clone()); + let cache = Arc::new(BytecodeCache::new(100)); + let mut cached_db = CachedDatabase::new(mock_db, cache); + + // First call - cache miss, should hit database + let result1 = cached_db.code_by_hash(code_hash).unwrap(); + assert_eq!(result1.bytes(), bytecode.bytes()); + assert_eq!(cached_db.inner().code_by_hash_call_count(), 1); + + // Second call - cache hit, should NOT hit database + let result2 = cached_db.code_by_hash(code_hash).unwrap(); + assert_eq!(result2.bytes(), bytecode.bytes()); + assert_eq!(cached_db.inner().code_by_hash_call_count(), 1); // Still 1! + } + + #[test] + fn test_cached_database_delegates_other_methods() { + let mock_db = MockDatabase::new(); + let cache = Arc::new(BytecodeCache::new(100)); + let mut cached_db = CachedDatabase::new(mock_db, cache); + + // These should delegate to inner database + assert!(cached_db.basic(Address::ZERO).unwrap().is_none()); + assert_eq!(cached_db.storage(Address::ZERO, U256::ZERO).unwrap(), U256::ZERO); + assert_eq!(cached_db.block_hash(0).unwrap(), B256::ZERO); + } + + // ======================================================================== + // PinnedStorageCache Tests + // ======================================================================== + + #[test] + fn test_pinned_storage_cache_basic_operations() { + let contract = Address::repeat_byte(0x42); + let cache = PinnedStorageCache::new(vec![contract]); + + // Initially empty + assert!(cache.is_pinned(&contract)); + assert_eq!(cache.slot_count(&contract), 0); + assert!(cache.get_storage(&contract, &U256::from(1)).is_none()); + + // Set a value + cache.set_storage(contract, U256::from(1), U256::from(100)); + + // Should be retrievable + assert_eq!(cache.get_storage(&contract, &U256::from(1)), Some(U256::from(100))); + assert_eq!(cache.slot_count(&contract), 1); + } + + #[test] + fn test_pinned_storage_cache_non_pinned_ignored() { + let pinned = Address::repeat_byte(0x01); + let not_pinned = Address::repeat_byte(0x02); + let cache = PinnedStorageCache::new(vec![pinned]); + + assert!(cache.is_pinned(&pinned)); + assert!(!cache.is_pinned(¬_pinned)); + + // Storing to non-pinned address should be ignored + cache.set_storage(not_pinned, U256::from(1), U256::from(100)); + assert!(cache.get_storage(¬_pinned, &U256::from(1)).is_none()); + assert_eq!(cache.total_slot_count(), 0); + } + + #[test] + fn test_pinned_storage_cache_multiple_contracts() { + let contract1 = Address::repeat_byte(0x01); + let contract2 = Address::repeat_byte(0x02); + let cache = PinnedStorageCache::new(vec![contract1, contract2]); + + cache.set_storage(contract1, U256::from(1), U256::from(100)); + cache.set_storage(contract2, U256::from(1), U256::from(200)); + cache.set_storage(contract1, U256::from(2), U256::from(300)); + + assert_eq!(cache.get_storage(&contract1, &U256::from(1)), Some(U256::from(100))); + assert_eq!(cache.get_storage(&contract2, &U256::from(1)), Some(U256::from(200))); + assert_eq!(cache.get_storage(&contract1, &U256::from(2)), Some(U256::from(300))); + assert_eq!(cache.total_slot_count(), 3); + } + + #[test] + fn test_pinned_storage_cache_clear() { + let contract = Address::repeat_byte(0x42); + let cache = PinnedStorageCache::new(vec![contract]); + + cache.set_storage(contract, U256::from(1), U256::from(100)); + cache.set_storage(contract, U256::from(2), U256::from(200)); + assert_eq!(cache.slot_count(&contract), 2); + + cache.clear_address(&contract); + assert_eq!(cache.slot_count(&contract), 0); + } + + #[test] + fn test_pinned_storage_cache_empty() { + let cache = PinnedStorageCache::empty(); + + assert!(!cache.is_pinned(&Address::ZERO)); + assert_eq!(cache.total_slot_count(), 0); + } + + // ======================================================================== + // CachedDatabase with Pinned Storage Tests + // ======================================================================== + + #[test] + fn test_cached_database_pinned_storage_hit() { + let pinned_contract = Address::repeat_byte(0x42); + let slot = U256::from(1); + let value = U256::from(12345); + + let mock_db = MockDatabase::new().with_storage(pinned_contract, slot, value); + let bytecode_cache = Arc::new(BytecodeCache::new(100)); + let pinned_storage = Arc::new(PinnedStorageCache::new(vec![pinned_contract])); + let mut cached_db = CachedDatabase::with_pinned_storage(mock_db, bytecode_cache, pinned_storage); + + // First call - cache miss, should hit database + let result1 = cached_db.storage(pinned_contract, slot).unwrap(); + assert_eq!(result1, value); + assert_eq!(cached_db.inner().storage_call_count(), 1); + + // Second call - cache hit, should NOT hit database + let result2 = cached_db.storage(pinned_contract, slot).unwrap(); + assert_eq!(result2, value); + assert_eq!(cached_db.inner().storage_call_count(), 1); // Still 1! + } + + #[test] + fn test_cached_database_non_pinned_not_cached() { + let pinned_contract = Address::repeat_byte(0x01); + let non_pinned_contract = Address::repeat_byte(0x02); + let slot = U256::from(1); + + let mock_db = MockDatabase::new() + .with_storage(non_pinned_contract, slot, U256::from(999)); + let bytecode_cache = Arc::new(BytecodeCache::new(100)); + let pinned_storage = Arc::new(PinnedStorageCache::new(vec![pinned_contract])); + let mut cached_db = CachedDatabase::with_pinned_storage(mock_db, bytecode_cache, pinned_storage); + + // First call to non-pinned contract + let result1 = cached_db.storage(non_pinned_contract, slot).unwrap(); + assert_eq!(result1, U256::from(999)); + assert_eq!(cached_db.inner().storage_call_count(), 1); + + // Second call - should still hit database (not cached) + let result2 = cached_db.storage(non_pinned_contract, slot).unwrap(); + assert_eq!(result2, U256::from(999)); + assert_eq!(cached_db.inner().storage_call_count(), 2); // Now 2! + } + + #[test] + fn test_cached_database_without_pinned_storage() { + let contract = Address::repeat_byte(0x42); + let slot = U256::from(1); + + let mock_db = MockDatabase::new().with_storage(contract, slot, U256::from(100)); + let bytecode_cache = Arc::new(BytecodeCache::new(100)); + // No pinned storage - using new() instead of with_pinned_storage() + let mut cached_db = CachedDatabase::new(mock_db, bytecode_cache); + + // All calls should hit database + cached_db.storage(contract, slot).unwrap(); + assert_eq!(cached_db.inner().storage_call_count(), 1); + + cached_db.storage(contract, slot).unwrap(); + assert_eq!(cached_db.inner().storage_call_count(), 2); + } +} diff --git a/crates/ev-revm/src/lib.rs b/crates/ev-revm/src/lib.rs index da8401f..b528e37 100644 --- a/crates/ev-revm/src/lib.rs +++ b/crates/ev-revm/src/lib.rs @@ -2,6 +2,7 @@ pub mod api; pub mod base_fee; +pub mod cache; pub mod config; pub mod evm; pub mod factory; @@ -9,6 +10,7 @@ pub mod handler; pub use api::EvBuilder; pub use base_fee::{BaseFeeRedirect, BaseFeeRedirectError}; +pub use cache::{BytecodeCache, CachedDatabase, PinnedStorageCache}; pub use config::{BaseFeeConfig, ConfigError}; pub use evm::{DefaultEvEvm, EvEvm}; pub use factory::{ diff --git a/crates/node/src/builder.rs b/crates/node/src/builder.rs index 23acc2c..7d9ba71 100644 --- a/crates/node/src/builder.rs +++ b/crates/node/src/builder.rs @@ -2,7 +2,7 @@ use crate::config::EvolvePayloadBuilderConfig; use alloy_consensus::transaction::Transaction; use alloy_evm::eth::EthEvmFactory; use alloy_primitives::Address; -use ev_revm::EvEvmFactory; +use ev_revm::{BytecodeCache, CachedDatabase, EvEvmFactory, PinnedStorageCache}; use evolve_ev_reth::EvolvePayloadAttributes; use reth_chainspec::{ChainSpec, ChainSpecProvider}; use reth_errors::RethError; @@ -29,6 +29,10 @@ pub struct EvolvePayloadBuilder { pub evm_config: EvolveEthEvmConfig, /// Parsed Evolve-specific configuration pub config: EvolvePayloadBuilderConfig, + /// Shared bytecode cache for caching contract bytecode across payloads + bytecode_cache: Arc, + /// Shared pinned storage cache for hot contracts (configured via chainspec) + pinned_storage: Arc, } impl EvolvePayloadBuilder @@ -40,11 +44,24 @@ where + Sync + 'static, { + /// Default bytecode cache capacity (number of unique contracts to cache) + const DEFAULT_BYTECODE_CACHE_CAPACITY: usize = 10_000; + /// Creates a new instance of `EvolvePayloadBuilder` pub fn new( client: Arc, evm_config: EvolveEthEvmConfig, config: EvolvePayloadBuilderConfig, + ) -> Self { + Self::with_cache_capacity(client, evm_config, config, Self::DEFAULT_BYTECODE_CACHE_CAPACITY) + } + + /// Creates a new instance of `EvolvePayloadBuilder` with custom bytecode cache capacity + pub fn with_cache_capacity( + client: Arc, + evm_config: EvolveEthEvmConfig, + config: EvolvePayloadBuilderConfig, + bytecode_cache_capacity: usize, ) -> Self { if let Some((sink, activation)) = config.base_fee_redirect_settings() { info!( @@ -55,10 +72,30 @@ where ); } + info!( + target: "ev-reth", + cache_capacity = bytecode_cache_capacity, + "Bytecode cache initialized" + ); + + // Initialize pinned storage cache from config + let pinned_contracts = config.pinned_contracts().to_vec(); + if !pinned_contracts.is_empty() { + info!( + target: "ev-reth", + contract_count = pinned_contracts.len(), + contracts = ?pinned_contracts, + "Pinned storage cache initialized for hot contracts" + ); + } + let pinned_storage = Arc::new(PinnedStorageCache::new(pinned_contracts)); + Self { client, evm_config, config, + bytecode_cache: Arc::new(BytecodeCache::new(bytecode_cache_capacity)), + pinned_storage, } } @@ -75,10 +112,21 @@ where // Get the latest state provider let state_provider = self.client.latest().map_err(PayloadBuilderError::other)?; - // Create a database from the state provider - let db = StateProviderDatabase::new(&state_provider); + // IMPORTANT: Clear pinned storage cache between blocks to avoid stale state. + // Storage is mutable (unlike bytecode), so we must invalidate cached values + // when starting a new block to ensure we read the latest committed state. + // The cache will be re-populated during this block's execution. + self.pinned_storage.clear(); + + // Create a database from the state provider with bytecode and storage caching + let inner_db = StateProviderDatabase::new(&state_provider); + let cached_db = CachedDatabase::with_pinned_storage( + inner_db, + Arc::clone(&self.bytecode_cache), + Arc::clone(&self.pinned_storage), + ); let mut state_db = State::builder() - .with_database(db) + .with_database(cached_db) .with_bundle_update() .build(); diff --git a/crates/node/src/config.rs b/crates/node/src/config.rs index e8bb18b..9491964 100644 --- a/crates/node/src/config.rs +++ b/crates/node/src/config.rs @@ -21,6 +21,10 @@ struct ChainspecEvolveConfig { /// Block height at which the custom contract size limit activates. #[serde(default, rename = "contractSizeLimitActivationHeight")] pub contract_size_limit_activation_height: Option, + /// Contract addresses whose storage should be pinned in RAM for fast access. + /// These are typically hot contracts like DEXes, bridges, or popular tokens. + #[serde(default, rename = "pinnedContracts")] + pub pinned_contracts: Vec
, } /// Configuration for the Evolve payload builder @@ -44,6 +48,9 @@ pub struct EvolvePayloadBuilderConfig { /// Block height at which the custom contract size limit activates. #[serde(default)] pub contract_size_limit_activation_height: Option, + /// Contract addresses whose storage should be pinned in RAM for fast access. + #[serde(default)] + pub pinned_contracts: Vec
, } impl EvolvePayloadBuilderConfig { @@ -56,6 +63,7 @@ impl EvolvePayloadBuilderConfig { mint_precompile_activation_height: None, contract_size_limit: None, contract_size_limit_activation_height: None, + pinned_contracts: Vec::new(), } } @@ -90,10 +98,21 @@ impl EvolvePayloadBuilderConfig { config.contract_size_limit = extras.contract_size_limit; config.contract_size_limit_activation_height = extras.contract_size_limit_activation_height; + config.pinned_contracts = extras.pinned_contracts; } Ok(config) } + /// Returns the list of contract addresses whose storage should be pinned in RAM. + pub fn pinned_contracts(&self) -> &[Address] { + &self.pinned_contracts + } + + /// Returns true if the given address is configured for storage pinning. + pub fn is_pinned_contract(&self, address: &Address) -> bool { + self.pinned_contracts.contains(address) + } + /// Returns the contract size limit settings (limit, `activation_height`) if configured. /// Returns None if no custom limit is set (uses EIP-170 default). pub fn contract_size_limit_settings(&self) -> Option<(usize, u64)> { @@ -332,6 +351,7 @@ mod tests { mint_precompile_activation_height: Some(0), contract_size_limit: None, contract_size_limit_activation_height: None, + pinned_contracts: vec![], }; assert!(config_with_sink.validate().is_ok()); } @@ -346,6 +366,7 @@ mod tests { mint_precompile_activation_height: None, contract_size_limit: None, contract_size_limit_activation_height: None, + pinned_contracts: vec![], }; assert_eq!(config.base_fee_sink_for_block(4), None); @@ -475,4 +496,56 @@ mod tests { DEFAULT_CONTRACT_SIZE_LIMIT ); } + + #[test] + fn test_pinned_contracts_empty_by_default() { + let config = EvolvePayloadBuilderConfig::new(); + assert!(config.pinned_contracts().is_empty()); + assert!(!config.is_pinned_contract(&address!("0000000000000000000000000000000000000001"))); + } + + #[test] + fn test_pinned_contracts_from_chainspec() { + let contract1 = address!("0000000000000000000000000000000000000001"); + let contract2 = address!("0000000000000000000000000000000000000002"); + let extras = json!({ + "pinnedContracts": [ + "0x0000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000002" + ] + }); + + let chainspec = create_test_chainspec_with_extras(Some(extras)); + let config = EvolvePayloadBuilderConfig::from_chain_spec(&chainspec).unwrap(); + + assert_eq!(config.pinned_contracts().len(), 2); + assert!(config.is_pinned_contract(&contract1)); + assert!(config.is_pinned_contract(&contract2)); + assert!(!config.is_pinned_contract(&address!("0000000000000000000000000000000000000003"))); + } + + #[test] + fn test_pinned_contracts_empty_array() { + let extras = json!({ + "pinnedContracts": [] + }); + + let chainspec = create_test_chainspec_with_extras(Some(extras)); + let config = EvolvePayloadBuilderConfig::from_chain_spec(&chainspec).unwrap(); + + assert!(config.pinned_contracts().is_empty()); + } + + #[test] + fn test_pinned_contracts_not_specified() { + // When pinnedContracts is not specified at all + let extras = json!({ + "baseFeeSink": "0x0000000000000000000000000000000000000001" + }); + + let chainspec = create_test_chainspec_with_extras(Some(extras)); + let config = EvolvePayloadBuilderConfig::from_chain_spec(&chainspec).unwrap(); + + assert!(config.pinned_contracts().is_empty()); + } } From e01231dcb8ba3f881484d814d4fa5b2971f80578 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Mon, 8 Dec 2025 14:02:51 +0100 Subject: [PATCH 2/4] remove pining --- crates/ev-revm/benches/cache_benchmark.rs | 381 ++++++++-------------- crates/ev-revm/src/cache.rs | 360 ++------------------ crates/ev-revm/src/lib.rs | 2 +- crates/node/src/builder.rs | 38 +-- 4 files changed, 161 insertions(+), 620 deletions(-) diff --git a/crates/ev-revm/benches/cache_benchmark.rs b/crates/ev-revm/benches/cache_benchmark.rs index d988b9f..f73b42e 100644 --- a/crates/ev-revm/benches/cache_benchmark.rs +++ b/crates/ev-revm/benches/cache_benchmark.rs @@ -1,14 +1,13 @@ -//! Benchmarks for EVM caching layers. +//! Benchmarks for bytecode caching layer. //! -//! This benchmark compares performance of: -//! - Direct database access vs bytecode caching -//! - Direct database access vs pinned storage caching +//! This benchmark compares performance of direct database access vs bytecode caching. +//! Bytecode is immutable after deployment, making it ideal for caching. //! //! Run with: cargo bench -p ev-revm use alloy_primitives::{Address, B256, U256}; use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ev_revm::cache::{BytecodeCache, CachedDatabase, PinnedStorageCache}; +use ev_revm::cache::{BytecodeCache, CachedDatabase}; use rand::{Rng, SeedableRng}; use reth_revm::revm::{ context_interface::Database, @@ -17,7 +16,6 @@ use reth_revm::revm::{ use std::{collections::HashMap, sync::Arc}; /// Mock database with configurable latency simulation. -/// In real usage, database access involves disk I/O which is orders of magnitude slower. #[derive(Debug)] struct MockDatabase { bytecodes: HashMap, @@ -43,30 +41,21 @@ impl MockDatabase { let mut code = vec![0x60u8; bytecode_size]; // PUSH1 opcodes code[0] = 0x60; code[1] = (i % 256) as u8; - self.bytecodes.insert(code_hash, Bytecode::new_raw(code.into())); + self.bytecodes + .insert(code_hash, Bytecode::new_raw(code.into())); } self } - fn with_storage(mut self, address: Address, slot_count: usize) -> Self { - for i in 0..slot_count { - let slot = U256::from(i); - let value = U256::from(i * 1000); - self.storage.insert((address, slot), value); - } - self - } - - /// Simulate work to represent disk I/O latency + /// Simulate database latency by doing busy work fn simulate_latency(&self) { - // Do some work proportional to latency_factor - // This simulates the overhead of disk access - let mut dummy: u64 = 1; - for _ in 0..self.latency_factor { - dummy = dummy.wrapping_mul(7).wrapping_add(11); + if self.latency_factor > 0 { + let mut sum = 0u64; + for i in 0..self.latency_factor { + sum = sum.wrapping_add(i as u64); + } + black_box(sum); } - // Prevent optimization - black_box(dummy); } } @@ -85,7 +74,11 @@ impl Database for MockDatabase { fn storage(&mut self, address: Address, index: U256) -> Result { self.simulate_latency(); - Ok(self.storage.get(&(address, index)).copied().unwrap_or(U256::ZERO)) + Ok(self + .storage + .get(&(address, index)) + .copied() + .unwrap_or(U256::ZERO)) } fn block_hash(&mut self, _number: u64) -> Result { @@ -94,38 +87,31 @@ impl Database for MockDatabase { } } -/// Benchmark bytecode cache hit performance +/// Benchmark bytecode cache hits - all requests hit the cache fn bench_bytecode_cache_hit(c: &mut Criterion) { - let mut group = c.benchmark_group("bytecode_cache"); + let mut group = c.benchmark_group("bytecode_cache_hit"); - // Different cache hit scenarios + // Test different cache sizes for &num_contracts in &[10, 100, 1000] { group.throughput(Throughput::Elements(num_contracts as u64)); - // Setup: pre-populate cache - let cache = Arc::new(BytecodeCache::new(10_000)); - let mock_db = MockDatabase::new(100).with_bytecodes(num_contracts); - - // Pre-warm cache - let code_hashes: Vec<_> = (0..num_contracts) - .map(|i| B256::repeat_byte((i % 256) as u8)) - .collect(); - - for hash in &code_hashes { - if let Some(bytecode) = mock_db.bytecodes.get(hash) { - cache.insert(*hash, bytecode.clone()); - } + // Pre-populate cache + let cache = Arc::new(BytecodeCache::new(num_contracts * 2)); + for i in 0..num_contracts { + let code_hash = B256::repeat_byte((i % 256) as u8); + let bytecode = Bytecode::new_raw(vec![0x60, (i % 256) as u8].into()); + cache.insert(code_hash, bytecode); } - let mut cached_db = CachedDatabase::new(mock_db, cache); - group.bench_with_input( BenchmarkId::new("cache_hit", num_contracts), - &code_hashes, - |b, hashes| { + &num_contracts, + |b, &n| { b.iter(|| { - for hash in hashes { - let _ = black_box(cached_db.code_by_hash(*hash)); + for i in 0..n { + let code_hash = B256::repeat_byte((i % 256) as u8); + let result = cache.get(&code_hash); + black_box(result); } }) }, @@ -135,234 +121,123 @@ fn bench_bytecode_cache_hit(c: &mut Criterion) { group.finish(); } -/// Benchmark bytecode cache miss vs hit comparison +/// Benchmark cache miss vs hit - demonstrates the benefit of caching fn bench_bytecode_cache_miss_vs_hit(c: &mut Criterion) { - let mut group = c.benchmark_group("bytecode_cache_comparison"); - - let num_lookups = 100; - group.throughput(Throughput::Elements(num_lookups as u64)); - - // Setup databases with simulated latency - let latency_factor = 1000; // Simulate ~1000 cycles of work per DB access - - // Pre-generate code hashes - let code_hashes: Vec<_> = (0..num_lookups) - .map(|i| B256::repeat_byte((i % 256) as u8)) - .collect(); - - // Benchmark: No caching (direct DB access) - { - let mock_db = MockDatabase::new(latency_factor).with_bytecodes(num_lookups); - - // Wrap in CachedDatabase but don't pre-warm (always miss) - let cache = Arc::new(BytecodeCache::new(10_000)); - let mut cached_db = CachedDatabase::new(mock_db, cache); - - group.bench_function("always_miss", |b| { - b.iter(|| { - // Clear cache to ensure misses - cached_db.cache().clear(); - for hash in &code_hashes { - let _ = black_box(cached_db.code_by_hash(*hash)); - } - }) - }); - } + let mut group = c.benchmark_group("bytecode_cache_miss_vs_hit"); + + let num_contracts = 100; + let latency_factor = 1000; // Simulate some database latency - // Benchmark: With caching (all hits after first pass) - { - let mock_db = MockDatabase::new(latency_factor).with_bytecodes(num_lookups); - let cache = Arc::new(BytecodeCache::new(10_000)); - let mut cached_db = CachedDatabase::new(mock_db, cache); + group.throughput(Throughput::Elements(num_contracts as u64)); - // Pre-warm cache - for hash in &code_hashes { - let _ = cached_db.code_by_hash(*hash); + // Without cache (all misses go to database) + group.bench_function("no_cache", |b| { + let mut db = MockDatabase::new(latency_factor).with_bytecodes(num_contracts); + b.iter(|| { + for i in 0..num_contracts { + let code_hash = B256::repeat_byte((i % 256) as u8); + let result = db.code_by_hash(code_hash); + } + }) + }); + + // With cache (first pass misses, subsequent passes hit) + group.bench_function("with_cache_warm", |b| { + let db = MockDatabase::new(latency_factor).with_bytecodes(num_contracts); + let cache = Arc::new(BytecodeCache::new(num_contracts * 2)); + let mut cached_db = CachedDatabase::new(db, cache); + + // Warm up the cache + for i in 0..num_contracts { + let code_hash = B256::repeat_byte((i % 256) as u8); + let _ = cached_db.code_by_hash(code_hash); } - group.bench_function("always_hit", |b| { - b.iter(|| { - for hash in &code_hashes { - let _ = black_box(cached_db.code_by_hash(*hash)); - } - }) - }); - } + b.iter(|| { + for i in 0..num_contracts { + let code_hash = B256::repeat_byte((i % 256) as u8); + let result = cached_db.code_by_hash(code_hash); + } + }) + }); group.finish(); } -/// Benchmark pinned storage cache performance -fn bench_pinned_storage_cache(c: &mut Criterion) { - let mut group = c.benchmark_group("pinned_storage_cache"); - - let pinned_contract = Address::repeat_byte(0x42); - let num_slots = 100; - group.throughput(Throughput::Elements(num_slots as u64)); - - let slots: Vec<_> = (0..num_slots).map(U256::from).collect(); - let latency_factor = 1000; - - // Benchmark: Non-pinned storage (no caching) - { - let non_pinned = Address::repeat_byte(0x01); - let mock_db = MockDatabase::new(latency_factor).with_storage(non_pinned, num_slots); - let bytecode_cache = Arc::new(BytecodeCache::new(100)); - let pinned_storage = Arc::new(PinnedStorageCache::new(vec![pinned_contract])); // Different address - let mut cached_db = - CachedDatabase::with_pinned_storage(mock_db, bytecode_cache, pinned_storage); - - group.bench_function("non_pinned", |b| { - b.iter(|| { - for slot in &slots { - let _ = black_box(cached_db.storage(non_pinned, *slot)); - } - }) - }); - } +/// Benchmark LRU eviction behavior +fn bench_bytecode_cache_eviction(c: &mut Criterion) { + let mut group = c.benchmark_group("bytecode_cache_eviction"); - // Benchmark: Pinned storage (cache hit) - { - let mock_db = MockDatabase::new(latency_factor).with_storage(pinned_contract, num_slots); - let bytecode_cache = Arc::new(BytecodeCache::new(100)); - let pinned_storage = Arc::new(PinnedStorageCache::new(vec![pinned_contract])); - let mut cached_db = - CachedDatabase::with_pinned_storage(mock_db, bytecode_cache, pinned_storage); - - // Pre-warm cache - for slot in &slots { - let _ = cached_db.storage(pinned_contract, *slot); - } + let cache_size = 100; + let num_contracts = 200; // More contracts than cache can hold - group.bench_function("pinned_hit", |b| { - b.iter(|| { - for slot in &slots { - let _ = black_box(cached_db.storage(pinned_contract, *slot)); - } - }) - }); - } + group.throughput(Throughput::Elements(num_contracts as u64)); + + // Random access pattern (will cause evictions) + group.bench_function("random_access", |b| { + let db = MockDatabase::new(100).with_bytecodes(num_contracts); + let cache = Arc::new(BytecodeCache::new(cache_size)); + let mut cached_db = CachedDatabase::new(db, cache); + let mut rng = rand::rngs::StdRng::seed_from_u64(42); + + b.iter(|| { + for _ in 0..num_contracts { + let i = rng.gen_range(0..num_contracts); + let code_hash = B256::repeat_byte((i % 256) as u8); + let result = cached_db.code_by_hash(code_hash); + } + }) + }); + + // Sequential access pattern (better cache locality) + group.bench_function("sequential_access", |b| { + let db = MockDatabase::new(100).with_bytecodes(num_contracts); + let cache = Arc::new(BytecodeCache::new(cache_size)); + let mut cached_db = CachedDatabase::new(db, cache); + + b.iter(|| { + for i in 0..num_contracts { + let code_hash = B256::repeat_byte((i % 256) as u8); + let result = cached_db.code_by_hash(code_hash); + } + }) + }); group.finish(); } -/// Benchmark mixed workload with realistic access patterns -fn bench_mixed_workload(c: &mut Criterion) { - let mut group = c.benchmark_group("mixed_workload"); +/// Benchmark realistic workload with mixed operations +fn bench_realistic_workload(c: &mut Criterion) { + let mut group = c.benchmark_group("realistic_workload"); let num_contracts = 50; - let num_pinned = 5; - let slots_per_contract = 20; - let iterations = 100; - - group.throughput(Throughput::Elements(iterations as u64)); - - // Setup: Multiple contracts, some pinned - let pinned_contracts: Vec<_> = (0..num_pinned).map(|i| Address::repeat_byte(i as u8)).collect(); - let all_contracts: Vec<_> = (0..num_contracts) - .map(|i| Address::repeat_byte(i as u8)) - .collect(); - let code_hashes: Vec<_> = (0..num_contracts) - .map(|i| B256::repeat_byte(i as u8)) - .collect(); - let slots: Vec<_> = (0..slots_per_contract).map(U256::from).collect(); - + let ops_per_iteration = 1000; let latency_factor = 500; - // Create mock database with all storage - let mut mock_db = MockDatabase::new(latency_factor).with_bytecodes(num_contracts); - for contract in &all_contracts { - mock_db = MockDatabase { - bytecodes: mock_db.bytecodes, - storage: { - let mut s = mock_db.storage; - for i in 0..slots_per_contract { - s.insert((*contract, U256::from(i)), U256::from(i * 1000)); - } - s - }, - latency_factor, - }; - } + group.throughput(Throughput::Elements(ops_per_iteration as u64)); - // Benchmark: No caching - { + // Simulate a realistic workload where some contracts are called frequently + // (hot contracts) and others are called rarely (cold contracts) + group.bench_function("hot_cold_distribution", |b| { let db = MockDatabase::new(latency_factor).with_bytecodes(num_contracts); - let db = MockDatabase { - bytecodes: db.bytecodes, - storage: mock_db.storage.clone(), - latency_factor, - }; - - let bytecode_cache = Arc::new(BytecodeCache::new(100)); - let pinned_storage = Arc::new(PinnedStorageCache::empty()); // No pinning - let mut cached_db = - CachedDatabase::with_pinned_storage(db, bytecode_cache, pinned_storage); - - // Simulate realistic access: read bytecode, then storage slots + let cache = Arc::new(BytecodeCache::new(num_contracts)); + let mut cached_db = CachedDatabase::new(db, cache); let mut rng = rand::rngs::StdRng::seed_from_u64(42); - group.bench_function("no_pinning", |b| { - b.iter(|| { - cached_db.cache().clear(); - if let Some(ps) = cached_db.pinned_storage() { - ps.clear(); - } - for _ in 0..iterations { - let contract_idx = rng.gen_range(0..num_contracts); - let _ = black_box(cached_db.code_by_hash(code_hashes[contract_idx])); - for _ in 0..3 { - let slot_idx = rng.gen_range(0..slots_per_contract); - let _ = black_box( - cached_db.storage(all_contracts[contract_idx], slots[slot_idx]), - ); - } - } - }) - }); - } - - // Benchmark: With bytecode + pinned storage caching - { - let db = MockDatabase::new(latency_factor).with_bytecodes(num_contracts); - let db = MockDatabase { - bytecodes: db.bytecodes, - storage: mock_db.storage.clone(), - latency_factor, - }; - - let bytecode_cache = Arc::new(BytecodeCache::new(100)); - let pinned_storage = Arc::new(PinnedStorageCache::new(pinned_contracts.clone())); - let mut cached_db = - CachedDatabase::with_pinned_storage(db, bytecode_cache, pinned_storage); - - // Pre-warm caches - for hash in &code_hashes { - let _ = cached_db.code_by_hash(*hash); - } - for contract in &pinned_contracts { - for slot in &slots { - let _ = cached_db.storage(*contract, *slot); + // 80% of calls go to 20% of contracts (hot contracts: 0-9) + // 20% of calls go to 80% of contracts (cold contracts: 10-49) + b.iter(|| { + for _ in 0..ops_per_iteration { + let i = if rng.gen_bool(0.8) { + rng.gen_range(0..10) // Hot contract + } else { + rng.gen_range(10..num_contracts) // Cold contract + }; + let code_hash = B256::repeat_byte((i % 256) as u8); + let result = cached_db.code_by_hash(code_hash); } - } - - let mut rng = rand::rngs::StdRng::seed_from_u64(42); - - group.bench_function("with_caching", |b| { - b.iter(|| { - for _ in 0..iterations { - let contract_idx = rng.gen_range(0..num_contracts); - let _ = black_box(cached_db.code_by_hash(code_hashes[contract_idx])); - for _ in 0..3 { - let slot_idx = rng.gen_range(0..slots_per_contract); - let _ = black_box( - cached_db.storage(all_contracts[contract_idx], slots[slot_idx]), - ); - } - } - }) - }); - } + }) + }); group.finish(); } @@ -371,7 +246,7 @@ criterion_group!( benches, bench_bytecode_cache_hit, bench_bytecode_cache_miss_vs_hit, - bench_pinned_storage_cache, - bench_mixed_workload, + bench_bytecode_cache_eviction, + bench_realistic_workload, ); criterion_main!(benches); diff --git a/crates/ev-revm/src/cache.rs b/crates/ev-revm/src/cache.rs index 5a5943b..8b4d950 100644 --- a/crates/ev-revm/src/cache.rs +++ b/crates/ev-revm/src/cache.rs @@ -1,8 +1,7 @@ //! Caching layer for EVM database operations. //! -//! This module provides cache wrappers for database operations: -//! - `BytecodeCache`: Caches immutable contract bytecode -//! - `PinnedStorageCache`: Pins storage slots for specific contracts in RAM +//! This module provides a bytecode cache wrapper for database operations. +//! Contract bytecode is immutable after deployment, making it ideal for caching. use alloy_primitives::{Address, B256, U256}; use reth_revm::revm::{ @@ -10,7 +9,7 @@ use reth_revm::revm::{ state::{AccountInfo, Bytecode}, }; use std::{ - collections::{HashMap, HashSet}, + collections::HashMap, sync::{Arc, RwLock}, }; @@ -62,7 +61,8 @@ impl LruCache { } self.access_counter += 1; - self.entries.insert(key, (Arc::new(value), self.access_counter)); + self.entries + .insert(key, (Arc::new(value), self.access_counter)); } fn evict_oldest(&mut self, count: usize) { @@ -154,153 +154,24 @@ impl Default for BytecodeCache { } } -// ============================================================================ -// Pinned Storage Cache -// ============================================================================ - -/// RAM-pinned storage cache for hot contracts. -/// -/// This cache stores storage slots for explicitly configured contracts in RAM, -/// providing fast access for frequently-accessed contracts like DEXes, bridges, -/// or popular tokens. -/// -/// Unlike the bytecode cache which uses LRU eviction, pinned storage is permanent -/// for the duration of the node's runtime - the configured contracts are always -/// kept in RAM. -#[derive(Debug)] -pub struct PinnedStorageCache { - /// Set of contract addresses that should be pinned - pinned_addresses: HashSet
, - /// Storage slots for pinned contracts: address -> (slot -> value) - storage: RwLock>>, -} - -impl PinnedStorageCache { - /// Creates a new pinned storage cache for the given contract addresses. - /// - /// Only storage reads/writes for these addresses will be cached. - pub fn new(pinned_addresses: Vec
) -> Self { - let addresses: HashSet<_> = pinned_addresses.into_iter().collect(); - let storage = addresses.iter().map(|addr| (*addr, HashMap::new())).collect(); - - Self { - pinned_addresses: addresses, - storage: RwLock::new(storage), - } - } - - /// Creates an empty cache with no pinned contracts. - pub fn empty() -> Self { - Self { - pinned_addresses: HashSet::new(), - storage: RwLock::new(HashMap::new()), - } - } - - /// Returns true if the given address is configured for pinning. - #[inline] - pub fn is_pinned(&self, address: &Address) -> bool { - self.pinned_addresses.contains(address) - } - - /// Returns the set of pinned addresses. - pub fn pinned_addresses(&self) -> &HashSet
{ - &self.pinned_addresses - } - - /// Retrieves a storage value from the cache. - /// - /// Returns `None` if: - /// - The address is not a pinned contract - /// - The slot has not been cached yet - pub fn get_storage(&self, address: &Address, slot: &U256) -> Option { - if !self.is_pinned(address) { - return None; - } - - let storage = self.storage.read().expect("storage lock poisoned"); - storage.get(address)?.get(slot).copied() - } - - /// Stores a storage value in the cache. - /// - /// Only stores if the address is a pinned contract. - pub fn set_storage(&self, address: Address, slot: U256, value: U256) { - if !self.is_pinned(&address) { - return; - } - - let mut storage = self.storage.write().expect("storage lock poisoned"); - storage.entry(address).or_default().insert(slot, value); - } - - /// Returns the number of cached storage slots for a given address. - pub fn slot_count(&self, address: &Address) -> usize { - self.storage - .read() - .expect("storage lock poisoned") - .get(address) - .map(|slots| slots.len()) - .unwrap_or(0) - } - - /// Returns the total number of cached storage slots across all contracts. - pub fn total_slot_count(&self) -> usize { - self.storage - .read() - .expect("storage lock poisoned") - .values() - .map(|slots| slots.len()) - .sum() - } - - /// Clears all cached storage for a specific address. - pub fn clear_address(&self, address: &Address) { - if let Some(slots) = self - .storage - .write() - .expect("storage lock poisoned") - .get_mut(address) - { - slots.clear(); - } - } - - /// Clears all cached storage. - pub fn clear(&self) { - let mut storage = self.storage.write().expect("storage lock poisoned"); - for slots in storage.values_mut() { - slots.clear(); - } - } -} - -impl Default for PinnedStorageCache { - fn default() -> Self { - Self::empty() - } -} - // ============================================================================ // Cached Database // ============================================================================ -/// A database wrapper that adds bytecode and storage caching to any underlying database. +/// A database wrapper that adds bytecode caching to any underlying database. /// -/// This wrapper provides two levels of caching: -/// - **Bytecode caching**: Caches immutable contract bytecode by code hash -/// - **Pinned storage**: RAM-pins storage slots for explicitly configured contracts +/// Contract bytecode is immutable after deployment, so caching provides +/// significant performance benefits for frequently-called contracts. /// /// # Example /// /// ```ignore -/// use ev_revm::cache::{BytecodeCache, PinnedStorageCache, CachedDatabase}; +/// use ev_revm::cache::{BytecodeCache, CachedDatabase}; /// use std::sync::Arc; /// /// let inner_db = StateProviderDatabase::new(&state_provider); /// let bytecode_cache = Arc::new(BytecodeCache::with_default_capacity()); -/// let pinned_storage = Arc::new(PinnedStorageCache::new(vec![uniswap_address, usdc_address])); -/// let cached_db = CachedDatabase::with_pinned_storage(inner_db, bytecode_cache, pinned_storage); +/// let cached_db = CachedDatabase::new(inner_db, bytecode_cache); /// ``` #[derive(Debug)] pub struct CachedDatabase { @@ -308,12 +179,10 @@ pub struct CachedDatabase { inner: DB, /// Shared bytecode cache bytecode_cache: Arc, - /// Optional pinned storage cache for hot contracts - pinned_storage: Option>, } impl CachedDatabase { - /// Creates a new cached database wrapper with bytecode caching only. + /// Creates a new cached database wrapper. /// /// # Arguments /// * `inner` - The underlying database to wrap @@ -322,25 +191,6 @@ impl CachedDatabase { Self { inner, bytecode_cache, - pinned_storage: None, - } - } - - /// Creates a new cached database wrapper with both bytecode and pinned storage caching. - /// - /// # Arguments - /// * `inner` - The underlying database to wrap - /// * `bytecode_cache` - Shared bytecode cache - /// * `pinned_storage` - Shared pinned storage cache for hot contracts - pub fn with_pinned_storage( - inner: DB, - bytecode_cache: Arc, - pinned_storage: Arc, - ) -> Self { - Self { - inner, - bytecode_cache, - pinned_storage: Some(pinned_storage), } } @@ -368,11 +218,6 @@ impl CachedDatabase { pub fn cache(&self) -> &Arc { &self.bytecode_cache } - - /// Returns a reference to the pinned storage cache, if configured. - pub fn pinned_storage(&self) -> Option<&Arc> { - self.pinned_storage.as_ref() - } } impl Database for CachedDatabase { @@ -398,22 +243,7 @@ impl Database for CachedDatabase { } fn storage(&mut self, address: Address, index: U256) -> Result { - // Check pinned storage cache first - if let Some(pinned) = &self.pinned_storage { - if let Some(value) = pinned.get_storage(&address, &index) { - return Ok(value); - } - } - - // Cache miss or not pinned - fetch from underlying database - let value = self.inner.storage(address, index)?; - - // Cache for future use if this is a pinned contract - if let Some(pinned) = &self.pinned_storage { - pinned.set_storage(address, index, value); - } - - Ok(value) + self.inner.storage(address, index) } fn block_hash(&mut self, number: u64) -> Result { @@ -513,7 +343,6 @@ mod tests { bytecodes: HashMap, storage: HashMap<(Address, U256), U256>, code_by_hash_call_count: std::cell::Cell, - storage_call_count: std::cell::Cell, } impl MockDatabase { @@ -526,18 +355,9 @@ mod tests { self } - fn with_storage(mut self, address: Address, slot: U256, value: U256) -> Self { - self.storage.insert((address, slot), value); - self - } - fn code_by_hash_call_count(&self) -> usize { self.code_by_hash_call_count.get() } - - fn storage_call_count(&self) -> usize { - self.storage_call_count.get() - } } impl Database for MockDatabase { @@ -548,13 +368,17 @@ mod tests { } fn code_by_hash(&mut self, code_hash: B256) -> Result { - self.code_by_hash_call_count.set(self.code_by_hash_call_count.get() + 1); + self.code_by_hash_call_count + .set(self.code_by_hash_call_count.get() + 1); Ok(self.bytecodes.get(&code_hash).cloned().unwrap_or_default()) } fn storage(&mut self, address: Address, index: U256) -> Result { - self.storage_call_count.set(self.storage_call_count.get() + 1); - Ok(self.storage.get(&(address, index)).copied().unwrap_or(U256::ZERO)) + Ok(self + .storage + .get(&(address, index)) + .copied() + .unwrap_or(U256::ZERO)) } fn block_hash(&mut self, _number: u64) -> Result { @@ -590,148 +414,10 @@ mod tests { // These should delegate to inner database assert!(cached_db.basic(Address::ZERO).unwrap().is_none()); - assert_eq!(cached_db.storage(Address::ZERO, U256::ZERO).unwrap(), U256::ZERO); + assert_eq!( + cached_db.storage(Address::ZERO, U256::ZERO).unwrap(), + U256::ZERO + ); assert_eq!(cached_db.block_hash(0).unwrap(), B256::ZERO); } - - // ======================================================================== - // PinnedStorageCache Tests - // ======================================================================== - - #[test] - fn test_pinned_storage_cache_basic_operations() { - let contract = Address::repeat_byte(0x42); - let cache = PinnedStorageCache::new(vec![contract]); - - // Initially empty - assert!(cache.is_pinned(&contract)); - assert_eq!(cache.slot_count(&contract), 0); - assert!(cache.get_storage(&contract, &U256::from(1)).is_none()); - - // Set a value - cache.set_storage(contract, U256::from(1), U256::from(100)); - - // Should be retrievable - assert_eq!(cache.get_storage(&contract, &U256::from(1)), Some(U256::from(100))); - assert_eq!(cache.slot_count(&contract), 1); - } - - #[test] - fn test_pinned_storage_cache_non_pinned_ignored() { - let pinned = Address::repeat_byte(0x01); - let not_pinned = Address::repeat_byte(0x02); - let cache = PinnedStorageCache::new(vec![pinned]); - - assert!(cache.is_pinned(&pinned)); - assert!(!cache.is_pinned(¬_pinned)); - - // Storing to non-pinned address should be ignored - cache.set_storage(not_pinned, U256::from(1), U256::from(100)); - assert!(cache.get_storage(¬_pinned, &U256::from(1)).is_none()); - assert_eq!(cache.total_slot_count(), 0); - } - - #[test] - fn test_pinned_storage_cache_multiple_contracts() { - let contract1 = Address::repeat_byte(0x01); - let contract2 = Address::repeat_byte(0x02); - let cache = PinnedStorageCache::new(vec![contract1, contract2]); - - cache.set_storage(contract1, U256::from(1), U256::from(100)); - cache.set_storage(contract2, U256::from(1), U256::from(200)); - cache.set_storage(contract1, U256::from(2), U256::from(300)); - - assert_eq!(cache.get_storage(&contract1, &U256::from(1)), Some(U256::from(100))); - assert_eq!(cache.get_storage(&contract2, &U256::from(1)), Some(U256::from(200))); - assert_eq!(cache.get_storage(&contract1, &U256::from(2)), Some(U256::from(300))); - assert_eq!(cache.total_slot_count(), 3); - } - - #[test] - fn test_pinned_storage_cache_clear() { - let contract = Address::repeat_byte(0x42); - let cache = PinnedStorageCache::new(vec![contract]); - - cache.set_storage(contract, U256::from(1), U256::from(100)); - cache.set_storage(contract, U256::from(2), U256::from(200)); - assert_eq!(cache.slot_count(&contract), 2); - - cache.clear_address(&contract); - assert_eq!(cache.slot_count(&contract), 0); - } - - #[test] - fn test_pinned_storage_cache_empty() { - let cache = PinnedStorageCache::empty(); - - assert!(!cache.is_pinned(&Address::ZERO)); - assert_eq!(cache.total_slot_count(), 0); - } - - // ======================================================================== - // CachedDatabase with Pinned Storage Tests - // ======================================================================== - - #[test] - fn test_cached_database_pinned_storage_hit() { - let pinned_contract = Address::repeat_byte(0x42); - let slot = U256::from(1); - let value = U256::from(12345); - - let mock_db = MockDatabase::new().with_storage(pinned_contract, slot, value); - let bytecode_cache = Arc::new(BytecodeCache::new(100)); - let pinned_storage = Arc::new(PinnedStorageCache::new(vec![pinned_contract])); - let mut cached_db = CachedDatabase::with_pinned_storage(mock_db, bytecode_cache, pinned_storage); - - // First call - cache miss, should hit database - let result1 = cached_db.storage(pinned_contract, slot).unwrap(); - assert_eq!(result1, value); - assert_eq!(cached_db.inner().storage_call_count(), 1); - - // Second call - cache hit, should NOT hit database - let result2 = cached_db.storage(pinned_contract, slot).unwrap(); - assert_eq!(result2, value); - assert_eq!(cached_db.inner().storage_call_count(), 1); // Still 1! - } - - #[test] - fn test_cached_database_non_pinned_not_cached() { - let pinned_contract = Address::repeat_byte(0x01); - let non_pinned_contract = Address::repeat_byte(0x02); - let slot = U256::from(1); - - let mock_db = MockDatabase::new() - .with_storage(non_pinned_contract, slot, U256::from(999)); - let bytecode_cache = Arc::new(BytecodeCache::new(100)); - let pinned_storage = Arc::new(PinnedStorageCache::new(vec![pinned_contract])); - let mut cached_db = CachedDatabase::with_pinned_storage(mock_db, bytecode_cache, pinned_storage); - - // First call to non-pinned contract - let result1 = cached_db.storage(non_pinned_contract, slot).unwrap(); - assert_eq!(result1, U256::from(999)); - assert_eq!(cached_db.inner().storage_call_count(), 1); - - // Second call - should still hit database (not cached) - let result2 = cached_db.storage(non_pinned_contract, slot).unwrap(); - assert_eq!(result2, U256::from(999)); - assert_eq!(cached_db.inner().storage_call_count(), 2); // Now 2! - } - - #[test] - fn test_cached_database_without_pinned_storage() { - let contract = Address::repeat_byte(0x42); - let slot = U256::from(1); - - let mock_db = MockDatabase::new().with_storage(contract, slot, U256::from(100)); - let bytecode_cache = Arc::new(BytecodeCache::new(100)); - // No pinned storage - using new() instead of with_pinned_storage() - let mut cached_db = CachedDatabase::new(mock_db, bytecode_cache); - - // All calls should hit database - cached_db.storage(contract, slot).unwrap(); - assert_eq!(cached_db.inner().storage_call_count(), 1); - - cached_db.storage(contract, slot).unwrap(); - assert_eq!(cached_db.inner().storage_call_count(), 2); - } } diff --git a/crates/ev-revm/src/lib.rs b/crates/ev-revm/src/lib.rs index b528e37..aff658e 100644 --- a/crates/ev-revm/src/lib.rs +++ b/crates/ev-revm/src/lib.rs @@ -10,7 +10,7 @@ pub mod handler; pub use api::EvBuilder; pub use base_fee::{BaseFeeRedirect, BaseFeeRedirectError}; -pub use cache::{BytecodeCache, CachedDatabase, PinnedStorageCache}; +pub use cache::{BytecodeCache, CachedDatabase}; pub use config::{BaseFeeConfig, ConfigError}; pub use evm::{DefaultEvEvm, EvEvm}; pub use factory::{ diff --git a/crates/node/src/builder.rs b/crates/node/src/builder.rs index 7d9ba71..b74fb3c 100644 --- a/crates/node/src/builder.rs +++ b/crates/node/src/builder.rs @@ -2,7 +2,7 @@ use crate::config::EvolvePayloadBuilderConfig; use alloy_consensus::transaction::Transaction; use alloy_evm::eth::EthEvmFactory; use alloy_primitives::Address; -use ev_revm::{BytecodeCache, CachedDatabase, EvEvmFactory, PinnedStorageCache}; +use ev_revm::{BytecodeCache, CachedDatabase, EvEvmFactory}; use evolve_ev_reth::EvolvePayloadAttributes; use reth_chainspec::{ChainSpec, ChainSpecProvider}; use reth_errors::RethError; @@ -31,8 +31,6 @@ pub struct EvolvePayloadBuilder { pub config: EvolvePayloadBuilderConfig, /// Shared bytecode cache for caching contract bytecode across payloads bytecode_cache: Arc, - /// Shared pinned storage cache for hot contracts (configured via chainspec) - pinned_storage: Arc, } impl EvolvePayloadBuilder @@ -53,7 +51,12 @@ where evm_config: EvolveEthEvmConfig, config: EvolvePayloadBuilderConfig, ) -> Self { - Self::with_cache_capacity(client, evm_config, config, Self::DEFAULT_BYTECODE_CACHE_CAPACITY) + Self::with_cache_capacity( + client, + evm_config, + config, + Self::DEFAULT_BYTECODE_CACHE_CAPACITY, + ) } /// Creates a new instance of `EvolvePayloadBuilder` with custom bytecode cache capacity @@ -78,24 +81,11 @@ where "Bytecode cache initialized" ); - // Initialize pinned storage cache from config - let pinned_contracts = config.pinned_contracts().to_vec(); - if !pinned_contracts.is_empty() { - info!( - target: "ev-reth", - contract_count = pinned_contracts.len(), - contracts = ?pinned_contracts, - "Pinned storage cache initialized for hot contracts" - ); - } - let pinned_storage = Arc::new(PinnedStorageCache::new(pinned_contracts)); - Self { client, evm_config, config, bytecode_cache: Arc::new(BytecodeCache::new(bytecode_cache_capacity)), - pinned_storage, } } @@ -112,19 +102,9 @@ where // Get the latest state provider let state_provider = self.client.latest().map_err(PayloadBuilderError::other)?; - // IMPORTANT: Clear pinned storage cache between blocks to avoid stale state. - // Storage is mutable (unlike bytecode), so we must invalidate cached values - // when starting a new block to ensure we read the latest committed state. - // The cache will be re-populated during this block's execution. - self.pinned_storage.clear(); - - // Create a database from the state provider with bytecode and storage caching + // Create a database from the state provider with bytecode caching let inner_db = StateProviderDatabase::new(&state_provider); - let cached_db = CachedDatabase::with_pinned_storage( - inner_db, - Arc::clone(&self.bytecode_cache), - Arc::clone(&self.pinned_storage), - ); + let cached_db = CachedDatabase::new(inner_db, Arc::clone(&self.bytecode_cache)); let mut state_db = State::builder() .with_database(cached_db) .with_bundle_update() From 73a30d2a7e2ddf08fd552e12b21ff5029f591f48 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Mon, 8 Dec 2025 14:20:43 +0100 Subject: [PATCH 3/4] lint --- crates/ev-revm/benches/cache_benchmark.rs | 10 +++++----- crates/ev-revm/src/cache.rs | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/crates/ev-revm/benches/cache_benchmark.rs b/crates/ev-revm/benches/cache_benchmark.rs index f73b42e..0f79750 100644 --- a/crates/ev-revm/benches/cache_benchmark.rs +++ b/crates/ev-revm/benches/cache_benchmark.rs @@ -136,7 +136,7 @@ fn bench_bytecode_cache_miss_vs_hit(c: &mut Criterion) { b.iter(|| { for i in 0..num_contracts { let code_hash = B256::repeat_byte((i % 256) as u8); - let result = db.code_by_hash(code_hash); + let _result = db.code_by_hash(code_hash); } }) }); @@ -156,7 +156,7 @@ fn bench_bytecode_cache_miss_vs_hit(c: &mut Criterion) { b.iter(|| { for i in 0..num_contracts { let code_hash = B256::repeat_byte((i % 256) as u8); - let result = cached_db.code_by_hash(code_hash); + let _result = cached_db.code_by_hash(code_hash); } }) }); @@ -184,7 +184,7 @@ fn bench_bytecode_cache_eviction(c: &mut Criterion) { for _ in 0..num_contracts { let i = rng.gen_range(0..num_contracts); let code_hash = B256::repeat_byte((i % 256) as u8); - let result = cached_db.code_by_hash(code_hash); + let _result = cached_db.code_by_hash(code_hash); } }) }); @@ -198,7 +198,7 @@ fn bench_bytecode_cache_eviction(c: &mut Criterion) { b.iter(|| { for i in 0..num_contracts { let code_hash = B256::repeat_byte((i % 256) as u8); - let result = cached_db.code_by_hash(code_hash); + let _result = cached_db.code_by_hash(code_hash); } }) }); @@ -234,7 +234,7 @@ fn bench_realistic_workload(c: &mut Criterion) { rng.gen_range(10..num_contracts) // Cold contract }; let code_hash = B256::repeat_byte((i % 256) as u8); - let result = cached_db.code_by_hash(code_hash); + let _result = cached_db.code_by_hash(code_hash); } }) }); diff --git a/crates/ev-revm/src/cache.rs b/crates/ev-revm/src/cache.rs index 8b4d950..5cb9736 100644 --- a/crates/ev-revm/src/cache.rs +++ b/crates/ev-revm/src/cache.rs @@ -20,7 +20,7 @@ use std::{ /// avoiding repeated database lookups for frequently-called contracts. #[derive(Debug)] pub struct BytecodeCache { - /// The actual cache storage, protected by a RwLock for thread-safety. + /// The actual cache storage, protected by a `RwLock`` for thread-safety. /// Values are Arc'd to allow cheap cloning when returning cached bytecode. cache: RwLock, /// Maximum number of entries before eviction @@ -30,7 +30,7 @@ pub struct BytecodeCache { /// Simple LRU cache implementation #[derive(Debug)] struct LruCache { - /// Map from code hash to (bytecode, access_order) + /// Map from code hash to (bytecode, `access_order``) entries: HashMap, u64)>, /// Counter for tracking access order access_counter: u64, @@ -195,7 +195,7 @@ impl CachedDatabase { } /// Returns a reference to the underlying database. - pub fn inner(&self) -> &DB { + pub const fn inner(&self) -> &DB { &self.inner } @@ -210,12 +210,12 @@ impl CachedDatabase { } /// Returns a reference to the bytecode cache. - pub fn bytecode_cache(&self) -> &Arc { + pub const fn bytecode_cache(&self) -> &Arc { &self.bytecode_cache } /// Returns a reference to the bytecode cache (alias for backwards compatibility). - pub fn cache(&self) -> &Arc { + pub const fn cache(&self) -> &Arc { &self.bytecode_cache } } @@ -262,7 +262,7 @@ mod tests { // Create a test bytecode let code_hash = B256::repeat_byte(0x42); - let bytecode = Bytecode::new_raw(bytes!("6080604052").into()); + let bytecode = Bytecode::new_raw(bytes!("6080604052")); // Initially not in cache assert!(cache.get(&code_hash).is_none()); @@ -389,7 +389,7 @@ mod tests { #[test] fn test_cached_database_cache_hit() { let code_hash = B256::repeat_byte(0x42); - let bytecode = Bytecode::new_raw(bytes!("6080604052").into()); + let bytecode = Bytecode::new_raw(bytes!("6080604052")); let mock_db = MockDatabase::new().with_bytecode(code_hash, bytecode.clone()); let cache = Arc::new(BytecodeCache::new(100)); From 95f50c80ee8d602711a852511b5011bed799510b Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Mon, 8 Dec 2025 14:30:45 +0100 Subject: [PATCH 4/4] cleanup --- crates/ev-revm/benches/cache_benchmark.rs | 1 + crates/ev-revm/src/cache.rs | 6 +- crates/node/src/config.rs | 73 ----------------------- 3 files changed, 4 insertions(+), 76 deletions(-) diff --git a/crates/ev-revm/benches/cache_benchmark.rs b/crates/ev-revm/benches/cache_benchmark.rs index 0f79750..a8bb72d 100644 --- a/crates/ev-revm/benches/cache_benchmark.rs +++ b/crates/ev-revm/benches/cache_benchmark.rs @@ -242,6 +242,7 @@ fn bench_realistic_workload(c: &mut Criterion) { group.finish(); } +#[allow(missing_docs)] criterion_group!( benches, bench_bytecode_cache_hit, diff --git a/crates/ev-revm/src/cache.rs b/crates/ev-revm/src/cache.rs index 5cb9736..1a1485b 100644 --- a/crates/ev-revm/src/cache.rs +++ b/crates/ev-revm/src/cache.rs @@ -20,7 +20,7 @@ use std::{ /// avoiding repeated database lookups for frequently-called contracts. #[derive(Debug)] pub struct BytecodeCache { - /// The actual cache storage, protected by a `RwLock`` for thread-safety. + /// The actual cache storage, protected by a `RwLock` for thread-safety. /// Values are Arc'd to allow cheap cloning when returning cached bytecode. cache: RwLock, /// Maximum number of entries before eviction @@ -30,7 +30,7 @@ pub struct BytecodeCache { /// Simple LRU cache implementation #[derive(Debug)] struct LruCache { - /// Map from code hash to (bytecode, `access_order``) + /// Map from code hash to (bytecode, `access_order`) entries: HashMap, u64)>, /// Counter for tracking access order access_counter: u64, @@ -187,7 +187,7 @@ impl CachedDatabase { /// # Arguments /// * `inner` - The underlying database to wrap /// * `bytecode_cache` - Shared bytecode cache (can be shared across multiple databases) - pub fn new(inner: DB, bytecode_cache: Arc) -> Self { + pub const fn new(inner: DB, bytecode_cache: Arc) -> Self { Self { inner, bytecode_cache, diff --git a/crates/node/src/config.rs b/crates/node/src/config.rs index 9491964..e8bb18b 100644 --- a/crates/node/src/config.rs +++ b/crates/node/src/config.rs @@ -21,10 +21,6 @@ struct ChainspecEvolveConfig { /// Block height at which the custom contract size limit activates. #[serde(default, rename = "contractSizeLimitActivationHeight")] pub contract_size_limit_activation_height: Option, - /// Contract addresses whose storage should be pinned in RAM for fast access. - /// These are typically hot contracts like DEXes, bridges, or popular tokens. - #[serde(default, rename = "pinnedContracts")] - pub pinned_contracts: Vec
, } /// Configuration for the Evolve payload builder @@ -48,9 +44,6 @@ pub struct EvolvePayloadBuilderConfig { /// Block height at which the custom contract size limit activates. #[serde(default)] pub contract_size_limit_activation_height: Option, - /// Contract addresses whose storage should be pinned in RAM for fast access. - #[serde(default)] - pub pinned_contracts: Vec
, } impl EvolvePayloadBuilderConfig { @@ -63,7 +56,6 @@ impl EvolvePayloadBuilderConfig { mint_precompile_activation_height: None, contract_size_limit: None, contract_size_limit_activation_height: None, - pinned_contracts: Vec::new(), } } @@ -98,21 +90,10 @@ impl EvolvePayloadBuilderConfig { config.contract_size_limit = extras.contract_size_limit; config.contract_size_limit_activation_height = extras.contract_size_limit_activation_height; - config.pinned_contracts = extras.pinned_contracts; } Ok(config) } - /// Returns the list of contract addresses whose storage should be pinned in RAM. - pub fn pinned_contracts(&self) -> &[Address] { - &self.pinned_contracts - } - - /// Returns true if the given address is configured for storage pinning. - pub fn is_pinned_contract(&self, address: &Address) -> bool { - self.pinned_contracts.contains(address) - } - /// Returns the contract size limit settings (limit, `activation_height`) if configured. /// Returns None if no custom limit is set (uses EIP-170 default). pub fn contract_size_limit_settings(&self) -> Option<(usize, u64)> { @@ -351,7 +332,6 @@ mod tests { mint_precompile_activation_height: Some(0), contract_size_limit: None, contract_size_limit_activation_height: None, - pinned_contracts: vec![], }; assert!(config_with_sink.validate().is_ok()); } @@ -366,7 +346,6 @@ mod tests { mint_precompile_activation_height: None, contract_size_limit: None, contract_size_limit_activation_height: None, - pinned_contracts: vec![], }; assert_eq!(config.base_fee_sink_for_block(4), None); @@ -496,56 +475,4 @@ mod tests { DEFAULT_CONTRACT_SIZE_LIMIT ); } - - #[test] - fn test_pinned_contracts_empty_by_default() { - let config = EvolvePayloadBuilderConfig::new(); - assert!(config.pinned_contracts().is_empty()); - assert!(!config.is_pinned_contract(&address!("0000000000000000000000000000000000000001"))); - } - - #[test] - fn test_pinned_contracts_from_chainspec() { - let contract1 = address!("0000000000000000000000000000000000000001"); - let contract2 = address!("0000000000000000000000000000000000000002"); - let extras = json!({ - "pinnedContracts": [ - "0x0000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000002" - ] - }); - - let chainspec = create_test_chainspec_with_extras(Some(extras)); - let config = EvolvePayloadBuilderConfig::from_chain_spec(&chainspec).unwrap(); - - assert_eq!(config.pinned_contracts().len(), 2); - assert!(config.is_pinned_contract(&contract1)); - assert!(config.is_pinned_contract(&contract2)); - assert!(!config.is_pinned_contract(&address!("0000000000000000000000000000000000000003"))); - } - - #[test] - fn test_pinned_contracts_empty_array() { - let extras = json!({ - "pinnedContracts": [] - }); - - let chainspec = create_test_chainspec_with_extras(Some(extras)); - let config = EvolvePayloadBuilderConfig::from_chain_spec(&chainspec).unwrap(); - - assert!(config.pinned_contracts().is_empty()); - } - - #[test] - fn test_pinned_contracts_not_specified() { - // When pinnedContracts is not specified at all - let extras = json!({ - "baseFeeSink": "0x0000000000000000000000000000000000000001" - }); - - let chainspec = create_test_chainspec_with_extras(Some(extras)); - let config = EvolvePayloadBuilderConfig::from_chain_spec(&chainspec).unwrap(); - - assert!(config.pinned_contracts().is_empty()); - } }