From 304f41eaf663548ff77b689574af35d709fb6866 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 16 Dec 2025 11:57:01 -0500 Subject: [PATCH 01/54] initial definition of `Storage` trait --- crates/core/src/storage/mod.rs | 66 ++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 crates/core/src/storage/mod.rs diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs new file mode 100644 index 000000000..b0ce61474 --- /dev/null +++ b/crates/core/src/storage/mod.rs @@ -0,0 +1,66 @@ +// mod hash_map; +// #[cfg(feature = "sqlite")] +// mod sqlite; +// pub use hash_map::HashMap as StorageHashMap; +// #[cfg(feature = "sqlite")] +// pub use sqlite::SqliteStorage; +use surfpool_db::diesel::ConnectionError; + +use crate::error::SurfpoolError; + +#[derive(Debug, thiserror::Error)] +pub enum StorageError { + #[error("Invalid storage configuration: {0}")] + InvalidConfiguration(String), + #[error("Failed to get pooled connection for '{0}' database: {1}")] + PooledConnectionError(String, #[source] surfpool_db::diesel::r2d2::PoolError), + #[error("Failed to connect to {0} database: {1}")] + ConnectionError(String, ConnectionError), + #[error("Failed to serialize key for {0} database: {1}")] + SerializeKeyError(String, serde_json::Error), + #[error("Failed to serialize value for {0} database: {1}")] + SerializeValueError(String, serde_json::Error), + #[error("Failed to deserialize value in {0} database: {1}")] + DeserializeValueError(String, serde_json::Error), + #[error("Failed to acquire lock for database")] + LockError, + #[error("Query failed for table '{0}': {1}")] + QueryError(String, #[source] surfpool_db::diesel::result::Error), +} + +pub type StorageResult = Result; + +impl From for jsonrpc_core::Error { + fn from(err: StorageError) -> Self { + SurfpoolError::from(err).into() + } +} + +pub trait Storage: Send + Sync { + fn store(&mut self, key: K, value: V) -> StorageResult<()>; + fn clear(&mut self) -> StorageResult<()>; + fn get(&self, key: &K) -> StorageResult>; + fn take(&mut self, key: &K) -> StorageResult>; + fn keys(&self) -> StorageResult>; + fn into_iter(&self) -> StorageResult + '_>>; + fn contains_key(&self, key: &K) -> StorageResult { + Ok(self.get(key)?.is_some()) + } + + // Enable cloning of boxed trait objects + fn clone_box(&self) -> Box>; +} + +// Implement Clone for Box> +impl Clone for Box> { + fn clone(&self) -> Self { + self.clone_box() + } +} + +// Separate trait for construction - this doesn't need to be dyn-compatible +pub trait StorageConstructor: Storage + Clone { + fn connect(database_url: Option<&str>, table_name: &str) -> StorageResult + where + Self: Sized; +} From 893c1f32eb2ca740a517d2a0bf246577e682f8a1 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 16 Dec 2025 11:57:36 -0500 Subject: [PATCH 02/54] add implementation of `Storage` crate for sqlite storage --- crates/core/src/storage/mod.rs | 8 +- crates/core/src/storage/sqlite.rs | 313 ++++++++++++++++++++++++++++++ 2 files changed, 317 insertions(+), 4 deletions(-) create mode 100644 crates/core/src/storage/sqlite.rs diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index b0ce61474..0c63ce277 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -1,9 +1,9 @@ // mod hash_map; -// #[cfg(feature = "sqlite")] -// mod sqlite; +#[cfg(feature = "sqlite")] +mod sqlite; // pub use hash_map::HashMap as StorageHashMap; -// #[cfg(feature = "sqlite")] -// pub use sqlite::SqliteStorage; +#[cfg(feature = "sqlite")] +pub use sqlite::SqliteStorage; use surfpool_db::diesel::ConnectionError; use crate::error::SurfpoolError; diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs new file mode 100644 index 000000000..82f3ef7a7 --- /dev/null +++ b/crates/core/src/storage/sqlite.rs @@ -0,0 +1,313 @@ +use log::debug; +use serde::{Deserialize, Serialize}; +use surfpool_db::diesel::{ + self, QueryableByName, RunQueryDsl, + connection::SimpleConnection, + r2d2::{ConnectionManager, Pool}, + sql_query, + sql_types::Text, +}; + +use crate::storage::{Storage, StorageConstructor, StorageError, StorageResult}; + +#[derive(QueryableByName, Debug)] +struct KvRecord { + #[diesel(sql_type = Text)] + key: String, + #[diesel(sql_type = Text)] + value: String, +} + +#[derive(QueryableByName, Debug)] +struct ValueRecord { + #[diesel(sql_type = Text)] + value: String, +} + +#[derive(QueryableByName, Debug)] +struct KeyRecord { + #[diesel(sql_type = Text)] + key: String, +} + +#[derive(Clone)] +pub struct SqliteStorage { + pool: Pool>, + _phantom: std::marker::PhantomData<(K, V)>, + table_name: String, +} + +const NAME: &str = "SQLite"; + +impl SqliteStorage +where + K: Serialize + for<'de> Deserialize<'de>, + V: Serialize + for<'de> Deserialize<'de> + Clone, +{ + fn ensure_table_exists(&self) -> StorageResult<()> { + debug!("Ensuring table '{}' exists", self.table_name); + let create_table_sql = format!( + " + CREATE TABLE IF NOT EXISTS {} ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP + ) + ", + self.table_name + ); + + debug!("Getting connection from pool for table creation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + conn.batch_execute(&create_table_sql) + .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + + debug!("Successfully ensured table '{}' exists", self.table_name); + Ok(()) + } + + fn serialize_key(&self, key: &K) -> StorageResult { + debug!("Serializing key for table '{}'", self.table_name); + let result = + serde_json::to_string(key).map_err(|e| StorageError::SerializeKeyError(NAME.into(), e)); + if let Ok(ref serialized) = result { + debug!("Key serialized successfully: {}", serialized); + } + result + } + + fn serialize_value(&self, value: &V) -> StorageResult { + debug!("Serializing value for table '{}'", self.table_name); + let result = serde_json::to_string(value) + .map_err(|e| StorageError::SerializeValueError(NAME.into(), e)); + if let Ok(ref serialized) = result { + debug!( + "Value serialized successfully, length: {} chars", + serialized.len() + ); + } + result + } + + fn deserialize_value(&self, value_str: &str) -> StorageResult { + debug!( + "Deserializing value from table '{}', input length: {} chars", + self.table_name, + value_str.len() + ); + let result = serde_json::from_str(value_str) + .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e)); + if result.is_ok() { + debug!("Value deserialized successfully"); + } + result + } + + fn load_value_from_db(&self, key_str: &str) -> StorageResult> { + debug!("Loading value from DB for key: {}", key_str); + let query = sql_query(format!( + "SELECT value FROM {} WHERE key = ?", + self.table_name + )) + .bind::(key_str); + + debug!("Getting connection from pool for loading value"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + + if let Some(record) = records.into_iter().next() { + debug!("Found record for key: {}", key_str); + let value = self.deserialize_value(&record.value)?; + Ok(Some(value)) + } else { + debug!("No record found for key: {}", key_str); + Ok(None) + } + } +} + +impl Storage for SqliteStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn store(&mut self, key: K, value: V) -> StorageResult<()> { + debug!("Storing value in table '{}", self.table_name); + let key_str = self.serialize_key(&key)?; + let value_str = self.serialize_value(&value)?; + + // Use prepared statement with sql_query for better safety + let query = sql_query(format!( + "INSERT OR REPLACE INTO {} (key, value, updated_at) VALUES (?, ?, CURRENT_TIMESTAMP)", + self.table_name + )) + .bind::(&key_str) + .bind::(&value_str); + + debug!("Getting connection from pool for store operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + query + .execute(&mut *conn) + .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + + debug!("Value stored successfully in table '{}'", self.table_name); + Ok(()) + } + + fn get(&self, key: &K) -> StorageResult> { + debug!("Getting value from table '{}", self.table_name); + let key_str = self.serialize_key(key)?; + + self.load_value_from_db(&key_str) + } + + fn take(&mut self, key: &K) -> StorageResult> { + debug!("Taking value from table '{}'", self.table_name); + let key_str = self.serialize_key(key)?; + + // If not in cache, try to load from database + if let Some(value) = self.load_value_from_db(&key_str)? { + debug!("Value found, removing from database"); + // Remove from database + let delete_query = sql_query(format!("DELETE FROM {} WHERE key = ?", self.table_name)) + .bind::(&key_str); + + debug!("Getting connection from pool for delete operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + delete_query + .execute(&mut *conn) + .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + + debug!( + "Value taken and removed successfully from table '{}'", + self.table_name + ); + Ok(Some(value)) + } else { + debug!("No value found to take from table '{}'", self.table_name); + Ok(None) + } + } + + fn clear(&mut self) -> StorageResult<()> { + debug!("Clearing all data from table '{}'", self.table_name); + let delete_query = sql_query(format!("DELETE FROM {}", self.table_name)); + + debug!("Getting connection from pool for clear operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + delete_query + .execute(&mut *conn) + .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + + debug!("Table '{}' cleared successfully", self.table_name); + Ok(()) + } + + fn keys(&self) -> StorageResult> { + debug!("Fetching all keys from table '{}'", self.table_name); + let query = sql_query(format!("SELECT key FROM {}", self.table_name)); + + debug!("Getting connection from pool for keys operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + + let mut keys = Vec::new(); + for record in records { + let key: K = serde_json::from_str(&record.key) + .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e))?; + keys.push(key); + } + + debug!( + "Retrieved {} keys from table '{}'", + keys.len(), + self.table_name + ); + Ok(keys) + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn into_iter(&self) -> StorageResult + '_>> { + debug!( + "Creating iterator for all key-value pairs in table '{}'", + self.table_name + ); + let query = sql_query(format!("SELECT key, value FROM {}", self.table_name)); + + debug!("Getting connection from pool for into_iter operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + + let iter = records.into_iter().filter_map(move |record| { + let key: K = match serde_json::from_str(&record.key) { + Ok(k) => k, + Err(e) => { + debug!("Failed to deserialize key: {}", e); + return None; + } + }; + let value: V = match serde_json::from_str(&record.value) { + Ok(v) => v, + Err(e) => { + debug!("Failed to deserialize value: {}", e); + return None; + } + }; + Some((key, value)) + }); + + debug!( + "Iterator created successfully for table '{}'", + self.table_name + ); + Ok(Box::new(iter)) + } +} + +impl StorageConstructor for SqliteStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn connect(database_url: Option<&str>, table_name: &str) -> StorageResult { + let database_url = database_url.unwrap_or(":memory:"); + debug!( + "Connecting to SQLite database: {} with table: {}", + database_url, table_name + ); + let manager = ConnectionManager::::new(database_url); + debug!("Creating connection pool"); + let pool = + Pool::new(manager).map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; + + let storage = SqliteStorage { + pool, + _phantom: std::marker::PhantomData, + table_name: table_name.to_string(), + }; + + storage.ensure_table_exists()?; + debug!( + "SQLite storage connected successfully for table: {}", + table_name + ); + Ok(storage) + } +} From 82c813f7c446f1c0394229f02c0b7f5067a3f4f3 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 16 Dec 2025 11:58:12 -0500 Subject: [PATCH 03/54] update dependencies to have sqlite/postgres features --- .cargo/config.toml | 4 ++-- Cargo.lock | 2 ++ Cargo.toml | 1 + crates/cli/Cargo.toml | 4 ++-- crates/core/Cargo.toml | 5 +++++ 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 67febe85c..cd47f8586 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,5 +1,5 @@ [alias] -surfpool-install = "install --path crates/cli --locked --force --features supervisor_ui --features version_check" -surfpool-install-dev = "install --path crates/cli --locked --force --features supervisor_ui" +surfpool-install = "install --path crates/cli --locked --force --features supervisor_ui --features version_check --features sqlite" +surfpool-install-dev = "install --path crates/cli --locked --force --features supervisor_ui --features sqlite" # useful for local builds that point to local txtx crates - prevents conflicts with the supervisor_ui feature surfpool-install-minimal = "install --path crates/cli --locked --force" diff --git a/Cargo.lock b/Cargo.lock index 35aa5de94..b11a1be3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12292,9 +12292,11 @@ dependencies = [ "spl-associated-token-account-interface", "spl-token-2022-interface", "spl-token-interface", + "surfpool-db", "surfpool-subgraph", "surfpool-types", "test-case", + "thiserror 2.0.16", "tokio", "txtx-addon-kit", "txtx-addon-network-svm", diff --git a/Cargo.toml b/Cargo.toml index c24135b45..518bd572b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,6 +148,7 @@ spl-associated-token-account-interface = { version = "2.0.0", default-features = spl-token-2022-interface = { version = "2.0.0", default-features = false } spl-token-interface = { version = "2.0.0", default-features = false } test-case = "^3.3.1" +thiserror = "2.0" tokio = { version = "1.43.0", default-features = false } tokio-tungstenite = { version = "=0.20.1", default-features = false } toml = { version = "0.8.23", default-features = false } diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index a93192ef0..76a1a2622 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -73,8 +73,8 @@ cli = ["clap/derive", "clap/env", "clap_complete", "toml", "ctrlc", "hiro-system supervisor_ui = ["txtx-supervisor-ui/crates_build"] explorer = [] geyser_plugin = ["surfpool-core/geyser_plugin"] -sqlite = ["surfpool-gql/sqlite"] -postgres = ["surfpool-gql/postgres"] +sqlite = ["surfpool-gql/sqlite", "surfpool-core/sqlite"] +postgres = ["surfpool-gql/postgres", "surfpool-core/postgres"] version_check = [] subgraph = ["surfpool-core/subgraph"] diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index ca47710e3..ef8e61fd2 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -84,9 +84,11 @@ solana-version = { workspace = true } spl-associated-token-account-interface = { workspace = true } spl-token-interface = { workspace = true } spl-token-2022-interface = { workspace = true } +thiserror = { workspace = true } tokio = { workspace = true } uuid = { workspace = true } +surfpool-db = { workspace = true } surfpool-subgraph = { workspace = true, optional = true } surfpool-types = { workspace = true } @@ -101,6 +103,9 @@ test-case = { workspace = true } env_logger = "*" [features] +default = ["sqlite"] +sqlite = ["surfpool-db/sqlite"] +postgres = ["surfpool-db/postgres"] ignore_tests_ci = [] geyser_plugin = ["solana-geyser-plugin-manager"] subgraph = ["surfpool-subgraph"] From c7648c285a4184dab2bf0c55e40ee4fd5f28447f Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 16 Dec 2025 11:59:22 -0500 Subject: [PATCH 04/54] publicize Storage mod; add error conversion --- crates/core/src/error.rs | 10 ++++++++++ crates/core/src/lib.rs | 1 + 2 files changed, 11 insertions(+) diff --git a/crates/core/src/error.rs b/crates/core/src/error.rs index 370670ab3..97801c91b 100644 --- a/crates/core/src/error.rs +++ b/crates/core/src/error.rs @@ -9,6 +9,8 @@ use solana_clock::Slot; use solana_pubkey::Pubkey; use solana_transaction_status::EncodeError; +use crate::storage::StorageError; + pub type SurfpoolResult = std::result::Result; #[derive(Debug, Clone)] @@ -447,3 +449,11 @@ impl SurfpoolError { Self(error) } } + +impl From for SurfpoolError { + fn from(e: StorageError) -> Self { + let mut error = Error::internal_error(); + error.data = Some(json!(format!("Storage error: {}", e.to_string()))); + SurfpoolError(error) + } +} diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index dfc81b567..9f1d3ebd2 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -15,6 +15,7 @@ pub mod helpers; pub mod rpc; pub mod runloops; pub mod scenarios; +pub mod storage; pub mod surfnet; pub mod types; From ff4c82e16b9aeaafd3f933e7a354dc15de0c4d88 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 16 Dec 2025 12:00:00 -0500 Subject: [PATCH 05/54] replace `HashMap` with `dyn Storage` for `blocks` store in svm --- crates/core/src/rpc/accounts_data.rs | 36 ++++--- crates/core/src/rpc/bank_data.rs | 11 +- crates/core/src/rpc/full.rs | 145 +++++++++++++++------------ crates/core/src/rpc/minimal.rs | 63 ++++++------ crates/core/src/storage/hash_map.rs | 32 ++++++ crates/core/src/surfnet/locker.rs | 4 +- crates/core/src/surfnet/mod.rs | 2 +- crates/core/src/surfnet/svm.rs | 16 +-- 8 files changed, 188 insertions(+), 121 deletions(-) create mode 100644 crates/core/src/storage/hash_map.rs diff --git a/crates/core/src/rpc/accounts_data.rs b/crates/core/src/rpc/accounts_data.rs index 9d7cb88f0..fe5c682a6 100644 --- a/crates/core/src/rpc/accounts_data.rs +++ b/crates/core/src/rpc/accounts_data.rs @@ -473,12 +473,13 @@ impl AccountsData for SurfpoolAccountsDataRpc { // get the info we need and free up lock before validation let (current_slot, block_exists) = meta .with_svm_reader(|svm_reader| { - ( - svm_reader.get_latest_absolute_slot(), - svm_reader.blocks.contains_key(&block), - ) + svm_reader + .blocks + .contains_key(&block) + .map_err(SurfpoolError::from) + .map(|exists| (svm_reader.get_latest_absolute_slot(), exists)) }) - .map_err(Into::::into)?; + .map_err(Into::::into)??; // block is valid if it exists in our block history or it's not too far in the future if !block_exists && block > current_slot { @@ -816,17 +817,20 @@ mod tests { setup.context.svm_locker.with_svm_writer(|svm_writer| { use crate::surfnet::BlockHeader; - svm_writer.blocks.insert( - test_slot, - BlockHeader { - hash: SyntheticBlockhash::new(test_slot).to_string(), - previous_blockhash: SyntheticBlockhash::new(test_slot - 1).to_string(), - parent_slot: test_slot - 1, - block_time: chrono::Utc::now().timestamp_millis(), - block_height: test_slot, - signatures: vec![], - }, - ); + svm_writer + .blocks + .store( + test_slot, + BlockHeader { + hash: SyntheticBlockhash::new(test_slot).to_string(), + previous_blockhash: SyntheticBlockhash::new(test_slot - 1).to_string(), + parent_slot: test_slot - 1, + block_time: chrono::Utc::now().timestamp_millis(), + block_height: test_slot, + signatures: vec![], + }, + ) + .unwrap(); }); let result = setup diff --git a/crates/core/src/rpc/bank_data.rs b/crates/core/src/rpc/bank_data.rs index 14746c1d8..40205dfdd 100644 --- a/crates/core/src/rpc/bank_data.rs +++ b/crates/core/src/rpc/bank_data.rs @@ -422,9 +422,14 @@ impl BankData for SurfpoolBankDataRpc { } fn get_inflation_rate(&self, meta: Self::Metadata) -> Result { - meta.with_svm_reader(|svm_reader| { - let inflation_activation_slot = - svm_reader.blocks.keys().min().copied().unwrap_or_default(); + meta.with_svm_reader(|svm_reader| -> RpcInflationRate { + let inflation_activation_slot = svm_reader + .blocks + .keys() + .unwrap_or_default() + .into_iter() + .min() + .unwrap_or_default(); let epoch_schedule = svm_reader.inner.get_sysvar::(); let inflation_start_slot = epoch_schedule.get_first_slot_in_epoch( epoch_schedule diff --git a/crates/core/src/rpc/full.rs b/crates/core/src/rpc/full.rs index a5053b7f2..93b13fd78 100644 --- a/crates/core/src/rpc/full.rs +++ b/crates/core/src/rpc/full.rs @@ -1789,11 +1789,11 @@ impl Full for SurfpoolFullRpc { .await .map_err(|e| SurfpoolError::client_error(e).into()) } else { - let min_slot = svm_locker.with_svm_reader(|svm_reader| { - svm_reader.blocks.keys().min().copied().unwrap_or(0) - }); - - Ok(min_slot) + svm_locker.with_svm_reader(|svm_reader| { + Ok::<_, jsonrpc_core::Error>( + svm_reader.blocks.keys()?.into_iter().min().unwrap_or(0), + ) + }) } }) } @@ -1833,11 +1833,13 @@ impl Full for SurfpoolFullRpc { Box::pin(async move { let block_time = svm_locker.with_svm_reader(|svm_reader| { - svm_reader - .blocks - .get(&slot) - .map(|block| (block.block_time / 1_000) as UnixTimestamp) - }); + Ok::<_, jsonrpc_core::Error>( + svm_reader + .blocks + .get(&slot)? + .map(|block| (block.block_time / 1_000) as UnixTimestamp), + ) + })?; Ok(block_time) }) } @@ -1892,27 +1894,28 @@ impl Full for SurfpoolFullRpc { .map(|end| end.min(committed_latest_slot)) .unwrap_or(committed_latest_slot); - let (local_min_slot, local_slots, effective_end_slot) = - if effective_end_slot < start_slot { - (None, vec![], effective_end_slot) - } else { - svm_locker.with_svm_reader(|svm_reader| { - let local_min_slot = svm_reader.blocks.keys().min().copied(); - - let local_slots: Vec = svm_reader - .blocks - .keys() - .filter(|&&slot| { - slot >= start_slot - && slot <= effective_end_slot - && slot <= committed_latest_slot - }) - .copied() - .collect(); - - (local_min_slot, local_slots, effective_end_slot) - }) - }; + let (local_min_slot, local_slots, effective_end_slot) = if effective_end_slot + < start_slot + { + (None, vec![], effective_end_slot) + } else { + svm_locker.with_svm_reader(|svm_reader| { + let local_min_slot = svm_reader.blocks.keys()?.into_iter().min(); + + let local_slots: Vec = svm_reader + .blocks + .keys()? + .into_iter() + .filter(|slot| { + *slot >= start_slot + && *slot <= effective_end_slot + && *slot <= committed_latest_slot + }) + .collect(); + + Ok::<_, jsonrpc_core::Error>((local_min_slot, local_slots, effective_end_slot)) + })? + }; if let Some(min_context_slot) = config.min_context_slot { if committed_latest_slot < min_context_slot { @@ -2014,17 +2017,17 @@ impl Full for SurfpoolFullRpc { Box::pin(async move { let committed_latest_slot = svm_locker.get_slot_for_commitment(&commitment); let (local_min_slot, local_slots) = svm_locker.with_svm_reader(|svm_reader| { - let local_min_slot = svm_reader.blocks.keys().min().copied(); + let local_min_slot = svm_reader.blocks.keys()?.into_iter().min(); let local_slots: Vec = svm_reader .blocks - .keys() - .filter(|&&slot| slot >= start_slot && slot <= committed_latest_slot) - .copied() + .keys()? + .into_iter() + .filter(|slot| *slot >= start_slot && *slot <= committed_latest_slot) .collect(); - (local_min_slot, local_slots) - }); + Ok::<_, jsonrpc_core::Error>((local_min_slot, local_slots)) + })?; if let Some(min_context_slot) = config.min_context_slot { if committed_latest_slot < min_context_slot { @@ -2138,8 +2141,15 @@ impl Full for SurfpoolFullRpc { fn get_first_available_block(&self, meta: Self::Metadata) -> Result { meta.with_svm_reader(|svm_reader| { - svm_reader.blocks.keys().min().copied().unwrap_or_default() - }) + Ok::<_, jsonrpc_core::Error>( + svm_reader + .blocks + .keys()? + .into_iter() + .min() + .unwrap_or_default(), + ) + })? .map_err(Into::into) } @@ -2289,7 +2299,7 @@ impl Full for SurfpoolFullRpc { // Get MAX_PRIORITIZATION_FEE_BLOCKS_CACHE most recent blocks let recent_headers = blocks - .into_iter() + .into_iter()? .sorted_by_key(|(slot, _)| std::cmp::Reverse(*slot)) .take(MAX_PRIORITIZATION_FEE_BLOCKS_CACHE) .collect::>(); @@ -3230,17 +3240,20 @@ mod tests { let block_height = svm_writer.chain_tip.index; let parent_slot = svm_writer.get_latest_absolute_slot(); - svm_writer.blocks.insert( - parent_slot, - BlockHeader { - hash, - previous_blockhash: previous_chain_tip.hash.clone(), - block_time: chrono::Utc::now().timestamp_millis(), - block_height, + svm_writer + .blocks + .store( parent_slot, - signatures: Vec::new(), - }, - ); + BlockHeader { + hash, + previous_blockhash: previous_chain_tip.hash.clone(), + block_time: chrono::Utc::now().timestamp_millis(), + block_height, + parent_slot, + signatures: Vec::new(), + }, + ) + .unwrap(); } let res = setup @@ -3827,18 +3840,21 @@ mod tests { let slots: Vec = slots.into_iter().collect(); setup.context.svm_locker.with_svm_writer(|svm_writer| { for slot in slots.iter() { - svm_writer.blocks.insert( - *slot, - BlockHeader { - hash: SyntheticBlockhash::new(*slot).to_string(), - previous_blockhash: SyntheticBlockhash::new(slot.saturating_sub(1)) - .to_string(), - block_time: chrono::Utc::now().timestamp_millis(), - block_height: *slot, - parent_slot: slot.saturating_sub(1), - signatures: vec![], - }, - ); + svm_writer + .blocks + .store( + *slot, + BlockHeader { + hash: SyntheticBlockhash::new(*slot).to_string(), + previous_blockhash: SyntheticBlockhash::new(slot.saturating_sub(1)) + .to_string(), + block_time: chrono::Utc::now().timestamp_millis(), + block_height: *slot, + parent_slot: slot.saturating_sub(1), + signatures: vec![], + }, + ) + .unwrap(); } svm_writer.latest_epoch_info.absolute_slot = slots.into_iter().max().unwrap_or(0); }); @@ -3899,7 +3915,7 @@ mod tests { insert_test_blocks(&setup, local_slots); let local_min = setup.context.svm_locker.with_svm_reader(|svm_reader| { - let min = svm_reader.blocks.keys().min().copied(); + let min = svm_reader.blocks.keys().unwrap().into_iter().min(); min }); assert_eq!(local_min, Some(50), "Local minimum should be slot 50"); @@ -3989,9 +4005,8 @@ mod tests { }); let (local_min, latest_slot) = setup.context.svm_locker.with_svm_reader(|svm_reader| { - let min = svm_reader.blocks.keys().min().copied(); + let min = svm_reader.blocks.keys().unwrap().into_iter().min(); let latest = svm_reader.get_latest_absolute_slot(); - let _available: Vec<_> = svm_reader.blocks.keys().copied().collect(); (min, latest) }); assert_eq!(local_min, Some(100), "Local minimum should be 100"); diff --git a/crates/core/src/rpc/minimal.rs b/crates/core/src/rpc/minimal.rs index 8e67b7298..96e3706df 100644 --- a/crates/core/src/rpc/minimal.rs +++ b/crates/core/src/rpc/minimal.rs @@ -695,7 +695,7 @@ impl Minimal for SurfpoolMinimalRpc { if let Some(target_slot) = config.min_context_slot { let block_exists = meta .with_svm_reader(|svm_reader| svm_reader.blocks.contains_key(&target_slot)) - .map_err(Into::::into)?; + .map_err(Into::::into)??; if !block_exists { return Err(jsonrpc_core::Error::invalid_params(format!( @@ -707,22 +707,23 @@ impl Minimal for SurfpoolMinimalRpc { meta.with_svm_reader(|svm_reader| { if let Some(target_slot) = config.min_context_slot { - if let Some(block_header) = svm_reader.blocks.get(&target_slot) { - return block_header.block_height; + if let Some(block_header) = svm_reader.blocks.get(&target_slot)? { + return Ok(block_header.block_height); } } // default behavior: return the latest block height with commitment adjustments let latest_block_height = svm_reader.latest_epoch_info.block_height; - match config.commitment.unwrap_or_default().commitment { + let block_height = match config.commitment.unwrap_or_default().commitment { CommitmentLevel::Processed => latest_block_height, CommitmentLevel::Confirmed => latest_block_height.saturating_sub(1), CommitmentLevel::Finalized => { latest_block_height.saturating_sub(FINALIZATION_SLOT_THRESHOLD) } - } - }) + }; + Ok::(block_height) + })? .map_err(Into::into) } @@ -871,17 +872,20 @@ mod tests { { let mut svm_writer = setup.context.svm_locker.0.blocking_write(); for (slot, block_height) in &test_cases { - svm_writer.blocks.insert( - *slot, - crate::surfnet::BlockHeader { - hash: SyntheticBlockhash::new(*slot).to_string(), - previous_blockhash: SyntheticBlockhash::new(slot - 1).to_string(), - block_time: chrono::Utc::now().timestamp_millis(), - block_height: *block_height, - parent_slot: slot - 1, - signatures: Vec::new(), - }, - ); + svm_writer + .blocks + .store( + *slot, + crate::surfnet::BlockHeader { + hash: SyntheticBlockhash::new(*slot).to_string(), + previous_blockhash: SyntheticBlockhash::new(slot - 1).to_string(), + block_time: chrono::Utc::now().timestamp_millis(), + block_height: *block_height, + parent_slot: slot - 1, + signatures: Vec::new(), + }, + ) + .unwrap(); } } @@ -914,17 +918,20 @@ mod tests { { let mut svm_writer = setup.context.svm_locker.0.blocking_write(); - svm_writer.blocks.insert( - 100, - crate::surfnet::BlockHeader { - hash: SyntheticBlockhash::new(100).to_string(), - previous_blockhash: SyntheticBlockhash::new(99).to_string(), - block_time: chrono::Utc::now().timestamp_millis(), - block_height: 50, - parent_slot: 99, - signatures: Vec::new(), - }, - ); + svm_writer + .blocks + .store( + 100, + crate::surfnet::BlockHeader { + hash: SyntheticBlockhash::new(100).to_string(), + previous_blockhash: SyntheticBlockhash::new(99).to_string(), + block_time: chrono::Utc::now().timestamp_millis(), + block_height: 50, + parent_slot: 99, + signatures: Vec::new(), + }, + ) + .unwrap(); } // slot that definitely doesn't exist diff --git a/crates/core/src/storage/hash_map.rs b/crates/core/src/storage/hash_map.rs new file mode 100644 index 000000000..608579a68 --- /dev/null +++ b/crates/core/src/storage/hash_map.rs @@ -0,0 +1,32 @@ +// pub use std::collections::HashMap; +// use std::hash::Hash; + +// impl super::Storage for HashMap +// where +// K: Eq + Hash, +// { +// fn connect(database_url: Option<&str>) -> super::StorageResult +// where +// Self: Sized, +// { +// if database_url.is_some() { +// return Err(super::StorageError::InvalidConfiguration( +// "HashMap storage does not support database URLs".to_string(), +// )); +// } +// Ok(HashMap::new()) +// } + +// fn store(&mut self, key: K, value: V) -> super::StorageResult<()> { +// self.insert(key, value); +// Ok(()) +// } + +// fn get(&self, key: &K) -> super::StorageResult> { +// Ok(self.get(key)) +// } + +// fn take(&mut self, key: &K) -> super::StorageResult> { +// Ok(self.remove(key)) +// } +// } diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 0dcc7d956..85f9ed0b4 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -760,7 +760,7 @@ impl SurfnetSvmLocker { let slot = transaction_with_status_meta.slot; let block_time = svm_reader .blocks - .get(&slot) + .get(&slot)? .map(|b| (b.block_time / 1_000) as UnixTimestamp) .unwrap_or(0); let encoded = transaction_with_status_meta.encode( @@ -2681,7 +2681,7 @@ impl SurfnetSvmLocker { impl SurfnetSvmLocker { pub fn get_first_local_slot(&self) -> Option { - self.with_svm_reader(|svm_reader| svm_reader.blocks.keys().min().copied()) + self.with_svm_reader(|svm_reader| svm_reader.blocks.keys().unwrap().into_iter().min()) } pub async fn get_block( diff --git a/crates/core/src/surfnet/mod.rs b/crates/core/src/surfnet/mod.rs index c66a199a9..6176034f1 100644 --- a/crates/core/src/surfnet/mod.rs +++ b/crates/core/src/surfnet/mod.rs @@ -59,7 +59,7 @@ impl BlockIdentifier { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct BlockHeader { pub hash: String, pub previous_blockhash: String, diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index fef8584ad..997c88a37 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -101,6 +101,7 @@ use crate::{ error::{SurfpoolError, SurfpoolResult}, rpc::utils::convert_transaction_metadata_from_canonical, scenarios::TemplateRegistry, + storage::{SqliteStorage, Storage, StorageConstructor}, surfnet::{LogsSubscriptionData, locker::is_supported_token_program}, types::{ GeyserAccountUpdate, MintAccount, SurfnetTransactionStatus, SyntheticBlockhash, @@ -218,7 +219,7 @@ pub struct SurfnetSvm { pub inner: LiteSVM, pub remote_rpc_url: Option, pub chain_tip: BlockIdentifier, - pub blocks: HashMap, + pub blocks: Box>, pub transactions: HashMap, pub transactions_queued_for_confirmation: VecDeque<( VersionedTransaction, @@ -316,11 +317,13 @@ impl SurfnetSvm { let token_mints = HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); + let blocks_db = Box::new(SqliteStorage::connect(Some("surfnet.sqlite"), "blocks").unwrap()); + let mut svm = Self { inner, remote_rpc_url: None, chain_tip: BlockIdentifier::zero(), - blocks: HashMap::new(), + blocks: blocks_db, transactions: HashMap::new(), perf_samples: VecDeque::new(), transactions_processed: 0, @@ -1022,7 +1025,7 @@ impl SurfnetSvm { HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); self.inner = inner; - self.blocks.clear(); + self.blocks.clear()?; self.transactions.clear(); self.transactions_queued_for_confirmation.clear(); self.transactions_queued_for_finalization.clear(); @@ -1476,7 +1479,7 @@ impl SurfnetSvm { let num_transactions = confirmed_signatures.len() as u64; self.updated_at += self.slot_time; - self.blocks.insert( + self.blocks.store( slot, BlockHeader { hash: self.chain_tip.hash.clone(), @@ -1486,7 +1489,7 @@ impl SurfnetSvm { parent_slot: slot, signatures: confirmed_signatures, }, - ); + )?; if self.perf_samples.len() > 30 { self.perf_samples.pop_back(); } @@ -1968,7 +1971,7 @@ impl SurfnetSvm { slot: Slot, config: &RpcBlockConfig, ) -> SurfpoolResult> { - let Some(block) = self.blocks.get(&slot) else { + let Some(block) = self.blocks.get(&slot)? else { return Ok(None); }; @@ -2042,6 +2045,7 @@ impl SurfnetSvm { pub fn blockhash_for_slot(&self, slot: Slot) -> Option { self.blocks .get(&slot) + .unwrap() .and_then(|header| header.hash.parse().ok()) } From ffe5c8c7dc10e73e67c6b092307d39bee73a58af Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:36:31 -0500 Subject: [PATCH 06/54] implement conversion from LiteSVMError to SurfpoolError --- crates/core/src/error.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/crates/core/src/error.rs b/crates/core/src/error.rs index 97801c91b..60de40472 100644 --- a/crates/core/src/error.rs +++ b/crates/core/src/error.rs @@ -2,6 +2,7 @@ use std::{fmt::Display, future::Future, pin::Pin}; use crossbeam_channel::TrySendError; use jsonrpc_core::{Error, Result}; +use litesvm::error::LiteSVMError; use serde::Serialize; use serde_json::json; use solana_client::{client_error::ClientError, rpc_request::TokenAccountsFilter}; @@ -457,3 +458,11 @@ impl From for SurfpoolError { SurfpoolError(error) } } + +impl From for SurfpoolError { + fn from(e: LiteSVMError) -> Self { + let mut error = Error::internal_error(); + error.data = Some(json!(format!("LiteSVM error: {}", e.to_string()))); + SurfpoolError(error) + } +} From 4e6ca32476e37eb67937348572e477677d799faa Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:40:43 -0500 Subject: [PATCH 07/54] refactor error handling in Storage and Sqlite modules; enhance error context for query operations --- crates/core/src/storage/mod.rs | 102 +++++++++++++++++++++++++++--- crates/core/src/storage/sqlite.rs | 14 ++-- 2 files changed, 100 insertions(+), 16 deletions(-) diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index 0c63ce277..1a0756af7 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -10,22 +10,106 @@ use crate::error::SurfpoolError; #[derive(Debug, thiserror::Error)] pub enum StorageError { - #[error("Invalid storage configuration: {0}")] - InvalidConfiguration(String), #[error("Failed to get pooled connection for '{0}' database: {1}")] PooledConnectionError(String, #[source] surfpool_db::diesel::r2d2::PoolError), - #[error("Failed to connect to {0} database: {1}")] - ConnectionError(String, ConnectionError), - #[error("Failed to serialize key for {0} database: {1}")] + #[error("Failed to serialize key for '{0}' database: {1}")] SerializeKeyError(String, serde_json::Error), - #[error("Failed to serialize value for {0} database: {1}")] + #[error("Failed to serialize value for '{0}' database: {1}")] SerializeValueError(String, serde_json::Error), - #[error("Failed to deserialize value in {0} database: {1}")] + #[error("Failed to deserialize value in '{0}' database: {1}")] DeserializeValueError(String, serde_json::Error), #[error("Failed to acquire lock for database")] LockError, - #[error("Query failed for table '{0}': {1}")] - QueryError(String, #[source] surfpool_db::diesel::result::Error), + #[error("Query failed for table '{0}' in '{1}' database: {2}")] + QueryError(String, String, #[source] QueryExecuteError), +} + +impl StorageError { + pub fn create_table( + table_name: &str, + db_type: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::CreateTableError(e), + ) + } + pub fn store( + table_name: &str, + db_type: &str, + store_key: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::StoreError(store_key.to_string(), e), + ) + } + pub fn get( + table_name: &str, + db_type: &str, + get_key: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::GetError(get_key.to_string(), e), + ) + } + pub fn delete( + table_name: &str, + db_type: &str, + delete_key: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::DeleteError(delete_key.to_string(), e), + ) + } + pub fn get_all_keys( + table_name: &str, + db_type: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::GetAllKeysError(e), + ) + } + pub fn get_all_key_value_pairs( + table_name: &str, + db_type: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::GetAllKeyValuePairsError(e), + ) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum QueryExecuteError { + #[error("Failed to create table: {0}")] + CreateTableError(#[source] surfpool_db::diesel::result::Error), + #[error("Failed to store value for key '{0}': {1}")] + StoreError(String, #[source] surfpool_db::diesel::result::Error), + #[error("Failed to get value for key '{0}': {1}")] + GetError(String, #[source] surfpool_db::diesel::result::Error), + #[error("Failed to delete value for key '{0}': {1}")] + DeleteError(String, #[source] surfpool_db::diesel::result::Error), + #[error("Failed to get all keys: {0}")] + GetAllKeysError(#[source] surfpool_db::diesel::result::Error), + #[error("Failed to get all key-value pairs: {0}")] + GetAllKeyValuePairsError(#[source] surfpool_db::diesel::result::Error), } pub type StorageResult = Result; diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index 82f3ef7a7..e74f84c2d 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -62,7 +62,7 @@ where let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; conn.batch_execute(&create_table_sql) - .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + .map_err(|e| StorageError::create_table(&self.table_name, NAME, e))?; debug!("Successfully ensured table '{}' exists", self.table_name); Ok(()) @@ -118,7 +118,7 @@ where let records = query .load::(&mut *conn) - .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + .map_err(|e| StorageError::get(&self.table_name, NAME, key_str, e))?; if let Some(record) = records.into_iter().next() { debug!("Found record for key: {}", key_str); @@ -154,7 +154,7 @@ where query .execute(&mut *conn) - .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + .map_err(|e| StorageError::store(&self.table_name, NAME, &key_str, e))?; debug!("Value stored successfully in table '{}'", self.table_name); Ok(()) @@ -183,7 +183,7 @@ where delete_query .execute(&mut *conn) - .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + .map_err(|e| StorageError::delete(&self.table_name, NAME, &key_str, e))?; debug!( "Value taken and removed successfully from table '{}'", @@ -205,7 +205,7 @@ where delete_query .execute(&mut *conn) - .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + .map_err(|e| StorageError::delete(&self.table_name, NAME, "*all*", e))?; debug!("Table '{}' cleared successfully", self.table_name); Ok(()) @@ -220,7 +220,7 @@ where let records = query .load::(&mut *conn) - .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + .map_err(|e| StorageError::get_all_keys(&self.table_name, NAME, e))?; let mut keys = Vec::new(); for record in records { @@ -253,7 +253,7 @@ where let records = query .load::(&mut *conn) - .map_err(|e| StorageError::QueryError(self.table_name.clone(), e))?; + .map_err(|e| StorageError::get_all_key_value_pairs(&self.table_name, NAME, e))?; let iter = records.into_iter().filter_map(move |record| { let key: K = match serde_json::from_str(&record.key) { From f730cf19612dcb538aa258e51858303a247d32f0 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:41:19 -0500 Subject: [PATCH 08/54] refactor logging level from debug to trace in SqliteStorage methods --- crates/core/src/storage/sqlite.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index e74f84c2d..8a427a032 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -69,21 +69,21 @@ where } fn serialize_key(&self, key: &K) -> StorageResult { - debug!("Serializing key for table '{}'", self.table_name); + trace!("Serializing key for table '{}'", self.table_name); let result = serde_json::to_string(key).map_err(|e| StorageError::SerializeKeyError(NAME.into(), e)); if let Ok(ref serialized) = result { - debug!("Key serialized successfully: {}", serialized); + trace!("Key serialized successfully: {}", serialized); } result } fn serialize_value(&self, value: &V) -> StorageResult { - debug!("Serializing value for table '{}'", self.table_name); + trace!("Serializing value for table '{}'", self.table_name); let result = serde_json::to_string(value) .map_err(|e| StorageError::SerializeValueError(NAME.into(), e)); if let Ok(ref serialized) = result { - debug!( + trace!( "Value serialized successfully, length: {} chars", serialized.len() ); @@ -92,7 +92,7 @@ where } fn deserialize_value(&self, value_str: &str) -> StorageResult { - debug!( + trace!( "Deserializing value from table '{}', input length: {} chars", self.table_name, value_str.len() @@ -100,7 +100,7 @@ where let result = serde_json::from_str(value_str) .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e)); if result.is_ok() { - debug!("Value deserialized successfully"); + trace!("Value deserialized successfully"); } result } @@ -113,7 +113,7 @@ where )) .bind::(key_str); - debug!("Getting connection from pool for loading value"); + trace!("Getting connection from pool for loading value"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; let records = query @@ -149,7 +149,7 @@ where .bind::(&key_str) .bind::(&value_str); - debug!("Getting connection from pool for store operation"); + trace!("Getting connection from pool for store operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; query @@ -178,7 +178,7 @@ where let delete_query = sql_query(format!("DELETE FROM {} WHERE key = ?", self.table_name)) .bind::(&key_str); - debug!("Getting connection from pool for delete operation"); + trace!("Getting connection from pool for delete operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; delete_query @@ -200,7 +200,7 @@ where debug!("Clearing all data from table '{}'", self.table_name); let delete_query = sql_query(format!("DELETE FROM {}", self.table_name)); - debug!("Getting connection from pool for clear operation"); + trace!("Getting connection from pool for clear operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; delete_query @@ -215,7 +215,7 @@ where debug!("Fetching all keys from table '{}'", self.table_name); let query = sql_query(format!("SELECT key FROM {}", self.table_name)); - debug!("Getting connection from pool for keys operation"); + trace!("Getting connection from pool for keys operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; let records = query @@ -248,7 +248,7 @@ where ); let query = sql_query(format!("SELECT key, value FROM {}", self.table_name)); - debug!("Getting connection from pool for into_iter operation"); + trace!("Getting connection from pool for into_iter operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; let records = query @@ -293,7 +293,7 @@ where database_url, table_name ); let manager = ConnectionManager::::new(database_url); - debug!("Creating connection pool"); + trace!("Creating connection pool"); let pool = Pool::new(manager).map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; From 0c9b647e47ac62e3027773e540806151dbe18959 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:41:40 -0500 Subject: [PATCH 09/54] refactor StorageConstructor connect method to require a non-optional database URL --- crates/core/src/storage/mod.rs | 2 +- crates/core/src/storage/sqlite.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index 1a0756af7..729698d10 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -144,7 +144,7 @@ impl Clone for Box> { // Separate trait for construction - this doesn't need to be dyn-compatible pub trait StorageConstructor: Storage + Clone { - fn connect(database_url: Option<&str>, table_name: &str) -> StorageResult + fn connect(database_url: &str, table_name: &str) -> StorageResult where Self: Sized; } diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index 8a427a032..811ed6487 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -286,8 +286,7 @@ where K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, { - fn connect(database_url: Option<&str>, table_name: &str) -> StorageResult { - let database_url = database_url.unwrap_or(":memory:"); + fn connect(database_url: &str, table_name: &str) -> StorageResult { debug!( "Connecting to SQLite database: {} with table: {}", database_url, table_name From 1aa70b6bc3ea3732757cf4003024a5b0361cd833 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:44:15 -0500 Subject: [PATCH 10/54] implement Storage trait for HashMap --- crates/core/src/storage/hash_map.rs | 75 +++++++++++++++++------------ crates/core/src/storage/mod.rs | 5 +- 2 files changed, 45 insertions(+), 35 deletions(-) diff --git a/crates/core/src/storage/hash_map.rs b/crates/core/src/storage/hash_map.rs index 608579a68..54b62ce32 100644 --- a/crates/core/src/storage/hash_map.rs +++ b/crates/core/src/storage/hash_map.rs @@ -1,32 +1,43 @@ -// pub use std::collections::HashMap; -// use std::hash::Hash; - -// impl super::Storage for HashMap -// where -// K: Eq + Hash, -// { -// fn connect(database_url: Option<&str>) -> super::StorageResult -// where -// Self: Sized, -// { -// if database_url.is_some() { -// return Err(super::StorageError::InvalidConfiguration( -// "HashMap storage does not support database URLs".to_string(), -// )); -// } -// Ok(HashMap::new()) -// } - -// fn store(&mut self, key: K, value: V) -> super::StorageResult<()> { -// self.insert(key, value); -// Ok(()) -// } - -// fn get(&self, key: &K) -> super::StorageResult> { -// Ok(self.get(key)) -// } - -// fn take(&mut self, key: &K) -> super::StorageResult> { -// Ok(self.remove(key)) -// } -// } +use serde::{Deserialize, Serialize}; +pub use std::collections::HashMap; +use std::hash::Hash; + +impl super::Storage for HashMap +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static + std::cmp::Eq + Hash, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn store(&mut self, key: K, value: V) -> super::StorageResult<()> { + self.insert(key, value); + Ok(()) + } + + fn clear(&mut self) -> super::StorageResult<()> { + self.clear(); + Ok(()) + } + + fn get(&self, key: &K) -> super::StorageResult> { + Ok(self.get(key).cloned()) + } + + fn take(&mut self, key: &K) -> super::StorageResult> { + Ok(self.remove(key)) + } + + fn keys(&self) -> super::StorageResult> { + Ok(self.keys().cloned().collect()) + } + + fn into_iter(&self) -> super::StorageResult + '_>> { + Ok(Box::new(self.clone().into_iter())) + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn contains_key(&self, key: &K) -> super::StorageResult { + Ok(self.contains_key(key)) + } +} diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index 729698d10..a6ed215b9 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -1,10 +1,9 @@ -// mod hash_map; +mod hash_map; #[cfg(feature = "sqlite")] mod sqlite; -// pub use hash_map::HashMap as StorageHashMap; +pub use hash_map::HashMap as StorageHashMap; #[cfg(feature = "sqlite")] pub use sqlite::SqliteStorage; -use surfpool_db::diesel::ConnectionError; use crate::error::SurfpoolError; From b834a8a0ad7f4f3bdc3a5b607576e63d758a815c Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:44:47 -0500 Subject: [PATCH 11/54] create SurfnetLiteSvm struct with database integration --- Cargo.toml | 1 + crates/core/Cargo.toml | 1 + crates/core/src/surfnet/mod.rs | 1 + crates/core/src/surfnet/surfnet_lite_svm.rs | 257 ++++++++++++++++++++ 4 files changed, 260 insertions(+) create mode 100644 crates/core/src/surfnet/surfnet_lite_svm.rs diff --git a/Cargo.toml b/Cargo.toml index 518bd572b..baa08e60c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -139,6 +139,7 @@ solana-signer = { version = "3.0.0", default-features = false } solana-slot-hashes = { version = "3.0.0", default-features = false } solana-system-interface = { version = "2.0.0", default-features = false } solana-sysvar = { version = "3.0.0", default-features = false } +solana-sysvar-id = { version = "3.0.0", default-features = false } solana-transaction = { version = "3.0.0", default-features = false } solana-transaction-context = { version = "3.0.0", default-features = false } solana-transaction-error = { version = "3.0.0", default-features = false } diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index ef8e61fd2..cfeea1735 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -77,6 +77,7 @@ solana-signer = { workspace = true } solana-slot-hashes = { workspace = true } solana-system-interface = { workspace = true } solana-sysvar = { workspace = true } +solana-sysvar-id = { workspace = true } solana-transaction = { workspace = true } solana-transaction-error = { workspace = true } solana-transaction-status = { workspace = true } diff --git a/crates/core/src/surfnet/mod.rs b/crates/core/src/surfnet/mod.rs index 6176034f1..215b8848d 100644 --- a/crates/core/src/surfnet/mod.rs +++ b/crates/core/src/surfnet/mod.rs @@ -23,6 +23,7 @@ use crate::{ pub mod locker; pub mod remote; +pub mod surfnet_lite_svm; pub mod svm; pub const FINALIZATION_SLOT_THRESHOLD: u64 = 31; diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs new file mode 100644 index 000000000..c0c8e154b --- /dev/null +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -0,0 +1,257 @@ +use std::collections::HashMap; + +use agave_feature_set::FeatureSet; +use litesvm::{ + LiteSVM, + types::{FailedTransactionMetadata, SimulatedTransactionInfo, TransactionResult}, +}; +use solana_account::{Account, AccountSharedData}; +use solana_loader_v3_interface::get_program_data_address; +use solana_program_option::COption; +use solana_pubkey::Pubkey; +use solana_transaction::versioned::VersionedTransaction; + +use crate::{ + error::SurfpoolResult, + storage::{SqliteStorage, Storage, StorageConstructor}, + surfnet::{GetAccountResult, locker::is_supported_token_program}, +}; + +#[derive(Clone)] +pub struct SurfnetLiteSvm { + pub svm: LiteSVM, + pub db: Option>>, +} + +impl SurfnetLiteSvm { + pub fn new() -> Self { + Self { + svm: LiteSVM::new(), + db: None, + } + } + + pub fn initialize( + mut self, + feature_set: FeatureSet, + database_url: Option<&str>, + ) -> SurfpoolResult { + self.svm = LiteSVM::new() + .with_blockhash_check(false) + .with_sigverify(false) + .with_feature_set(feature_set); + + create_native_mint(&mut self); + + if let Some(db_url) = database_url { + let db: Box> = + Box::new(SqliteStorage::connect(db_url, "accounts")?); + self.db = Some(db); + } + + Ok(self) + } + + pub fn reset(&mut self, feature_set: FeatureSet) -> SurfpoolResult<()> { + self.svm = LiteSVM::new() + .with_blockhash_check(false) + .with_sigverify(false) + .with_feature_set(feature_set); + + create_native_mint(self); + + if let Some(db) = &mut self.db { + db.clear()?; + } + Ok(()) + } + + /// Perform garbage collection by resetting the SVM state while retaining the database. + /// This is useful for cleaning up unused accounts and reducing memory usage. + /// If no database is configured, this function is a no-op. + pub fn garbage_collect(&mut self, feature_set: FeatureSet) { + // If no DB is configured, skip garbage collection + if self.db.is_none() { + return; + } + // todo: this is also resetting the log bytes limit and airdrop keypair, would be nice to avoid + self.svm = LiteSVM::new() + .with_blockhash_check(false) + .with_sigverify(false) + .with_feature_set(feature_set); + + create_native_mint(self); + } + + pub fn apply_feature_config(&mut self, feature_set: FeatureSet) -> &mut Self { + self.svm = LiteSVM::new() + .with_blockhash_check(false) + .with_sigverify(false) + .with_feature_set(feature_set); + + create_native_mint(self); + self + } + + pub fn set_log_bytes_limit(&mut self, limit: Option) { + self.svm.set_log_bytes_limit(limit); + } + + pub fn set_sigverify(&mut self, sigverify: bool) { + self.svm.set_sigverify(sigverify); + } + + pub fn with_blockhash_check(mut self, check: bool) -> Self { + self.svm = self.svm.with_blockhash_check(check); + self + } + + pub fn get_sysvar(&self) -> T + where + T: solana_sysvar::Sysvar + solana_sysvar_id::SysvarId + serde::de::DeserializeOwned, + { + self.svm.get_sysvar() + } + + pub fn set_sysvar(&mut self, sysvar: &T) + where + T: solana_sysvar::Sysvar + solana_sysvar_id::SysvarId + solana_sysvar::SysvarSerialize, + { + self.svm.set_sysvar(sysvar); + } + + pub fn expire_blockhash(&mut self) { + self.svm.expire_blockhash(); + } + + pub fn send_transaction(&mut self, tx: impl Into) -> TransactionResult { + self.svm.send_transaction(tx) + } + + pub fn minimum_balance_for_rent_exemption(&self, data_len: usize) -> u64 { + self.svm.minimum_balance_for_rent_exemption(data_len) + } + + pub fn simulate_transaction( + &self, + tx: impl Into, + ) -> Result { + self.svm.simulate_transaction(tx) + } + + pub fn airdrop(&mut self, pubkey: &Pubkey, lamports: u64) -> TransactionResult { + self.svm.airdrop(pubkey, lamports) + } + + pub fn get_account(&self, pubkey: &Pubkey) -> SurfpoolResult> { + if let Some(account) = self.svm.get_account(pubkey) { + return Ok(Some(account)); + } else if let Some(db) = &self.db { + return Ok(db.get(&pubkey.to_string())?.map::(Into::into)); + } + Ok(None) + } + + pub fn get_account_result(&self, pubkey: &Pubkey) -> SurfpoolResult { + if let Some(account) = self.svm.get_account(pubkey) { + return Ok(GetAccountResult::FoundAccount( + *pubkey, account, + // mark as not an account that should be updated in the SVM, since this is a local read and it already exists + false, + )); + } else if let Some(db) = &self.db { + let mut result = None; + if let Some(account) = db.get(&pubkey.to_string())?.map::(Into::into) { + if is_supported_token_program(&account.owner) { + if let Ok(token_account) = crate::types::TokenAccount::unpack(&account.data) { + let mint = db.get(&token_account.mint().to_string())?.map(Into::into); + + result = Some(GetAccountResult::FoundTokenAccount( + (*pubkey, account.clone()), + (token_account.mint(), mint), + )); + }; + } else if account.executable { + let program_data_address = get_program_data_address(pubkey); + + let program_data = db.get(&program_data_address.to_string())?.map(Into::into); + + result = Some(GetAccountResult::FoundProgramAccount( + (*pubkey, account.clone()), + (program_data_address, program_data), + )); + } + + return Ok(result.unwrap_or(GetAccountResult::FoundAccount( + *pubkey, account, + // Mark this account as needing to be updated in the SVM, since we pulled it from the db + true, + ))); + } + } + Ok(GetAccountResult::None(*pubkey)) + } + + pub fn set_account(&mut self, pubkey: Pubkey, account: Account) -> SurfpoolResult<()> { + self.set_account_in_db(pubkey, account.clone().into())?; + + self.svm.set_account(pubkey, account)?; + Ok(()) + } + + pub fn set_account_in_db( + &mut self, + pubkey: Pubkey, + account: AccountSharedData, + ) -> SurfpoolResult<()> { + if let Some(db) = &mut self.db { + db.store(pubkey.to_string(), account)?; + } + Ok(()) + } + + pub fn get_all_accounts(&self) -> SurfpoolResult> { + // In general, we trust the LiteSVM state as the most up-to-date source of truth for any given account, + // But there's a chance that the account was garbage collected, meaning it exists in the DB but not in the SVM. + // Therefore, we need to merge the two sources of accounts, prioritizing the SVM state. + let mut accounts = HashMap::new(); + if let Some(db) = &self.db { + let db_accounts = db.into_iter()?; + for (key, account) in db_accounts { + let pubkey = Pubkey::from_str_const(&key); + accounts.insert(pubkey, account); + } + } + for (pubkey, account) in self.svm.accounts_db().inner.iter() { + if !accounts.contains_key(pubkey) { + accounts.insert(*pubkey, account.clone()); + } + } + Ok(accounts.into_iter().collect()) + } +} + +fn create_native_mint(svm: &mut SurfnetLiteSvm) { + use solana_program_pack::Pack; + use solana_sysvar::rent::Rent; + use spl_token_interface::state::Mint; + + let mut data = vec![0; Mint::LEN]; + let mint = Mint { + mint_authority: COption::None, + supply: 0, + decimals: spl_token_interface::native_mint::DECIMALS, + is_initialized: true, + freeze_authority: COption::None, + }; + Mint::pack(mint, &mut data).unwrap(); + let account = Account { + lamports: svm.get_sysvar::().minimum_balance(data.len()), + data, + owner: spl_token_interface::ID, + executable: false, + rent_epoch: 0, + }; + svm.set_account(spl_token_interface::native_mint::ID, account) + .expect("Failed to create native mint account in SVM"); +} From 7971d87191d0afb751eed1265eca1a82cc7140d4 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:48:07 -0500 Subject: [PATCH 12/54] add `db` flag to CLI and use to pass db url to surfnet instantiation --- crates/cli/src/cli/mod.rs | 3 +++ crates/cli/src/cli/simnet/mod.rs | 4 +++- crates/core/src/surfnet/svm.rs | 16 ++++++++++++++-- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/crates/cli/src/cli/mod.rs b/crates/cli/src/cli/mod.rs index ca4573685..cacd393d3 100644 --- a/crates/cli/src/cli/mod.rs +++ b/crates/cli/src/cli/mod.rs @@ -245,6 +245,9 @@ pub struct StartSimnet { /// A set of inputs to use for the runbook (eg. surfpool start --runbook-input myInputs.json) #[arg(long = "runbook-input", short = 'i')] pub runbook_input: Vec, + /// Surfnet database connection URL for persistent Surfnets. For an in-memory sqlite database, use ":memory:". For an on-disk sqlite database, use a filename ending in '.sqlite'. + #[arg(long = "db")] + pub db: Option, } fn parse_svm_feature(s: &str) -> Result { diff --git a/crates/cli/src/cli/simnet/mod.rs b/crates/cli/src/cli/simnet/mod.rs index 2c6955258..c514414a3 100644 --- a/crates/cli/src/cli/simnet/mod.rs +++ b/crates/cli/src/cli/simnet/mod.rs @@ -60,7 +60,9 @@ pub async fn handle_start_local_surfnet_command( } // We start the simnet as soon as possible, as it needs to be ready for deployments - let (mut surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (mut surfnet_svm, simnet_events_rx, geyser_events_rx) = + SurfnetSvm::new_with_db(cmd.db.as_deref()) + .map_err(|e| format!("Failed to initialize Surfnet SVM: {}", e))?; // Apply feature configuration from CLI flags let feature_config = cmd.feature_config(); diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 997c88a37..32113f856 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -284,10 +284,22 @@ pub const FEATURE: Feature = Feature { }; impl SurfnetSvm { + pub fn new() -> (Self, Receiver, Receiver) { + Self::_new(None).unwrap() + } + + pub fn new_with_db( + database_url: Option<&str>, + ) -> SurfpoolResult<(Self, Receiver, Receiver)> { + Self::_new(database_url) + } + /// Creates a new instance of `SurfnetSvm`. /// /// Returns a tuple containing the SVM instance, a receiver for simulation events, and a receiver for Geyser plugin events. - pub fn new() -> (Self, Receiver, Receiver) { + fn _new( + database_url: Option<&str>, + ) -> SurfpoolResult<(Self, Receiver, Receiver)> { let (simnet_events_tx, simnet_events_rx) = crossbeam_channel::bounded(1024); let (geyser_events_tx, geyser_events_rx) = crossbeam_channel::bounded(1024); @@ -378,7 +390,7 @@ impl SurfnetSvm { // Generate the initial synthetic blockhash svm.chain_tip = svm.new_blockhash(); - (svm, simnet_events_rx, geyser_events_rx) + Ok((svm, simnet_events_rx, geyser_events_rx)) } /// Applies the SVM feature configuration to the internal feature set. From c0ea22e3d5c2e1ac958a33177d093f4d1148224d Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:49:40 -0500 Subject: [PATCH 13/54] implement db storage for `blocks` + `chain_tip` --- crates/core/src/surfnet/svm.rs | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 32113f856..76f458fc9 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -329,12 +329,30 @@ impl SurfnetSvm { let token_mints = HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); - let blocks_db = Box::new(SqliteStorage::connect(Some("surfnet.sqlite"), "blocks").unwrap()); + let blocks_db = if let Some(database_url) = database_url { + Box::new(SqliteStorage::connect(database_url, "blocks")?) + as Box> + } else { + Box::new(crate::storage::StorageHashMap::new()) as Box> + }; + + let chain_tip = if let Some((_, block)) = blocks_db + .into_iter() + .unwrap() + .max_by_key(|(slot, _): &(u64, BlockHeader)| *slot) + { + BlockIdentifier { + index: block.block_height, + hash: block.hash, + } + } else { + BlockIdentifier::zero() + }; let mut svm = Self { inner, remote_rpc_url: None, - chain_tip: BlockIdentifier::zero(), + chain_tip, blocks: blocks_db, transactions: HashMap::new(), perf_samples: VecDeque::new(), From 6baf02713741e5681e0236c03964f99f756dcb91 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:55:24 -0500 Subject: [PATCH 14/54] update `svm.rs` to use `SurfnetLiteSvm` for `inner` - update `inner` to use `SurfnetLiteSvm` - update all needed `SurfnetSvm` functions to now return results for potential db breakage - propagate result changes to necessary locker/rpc fns --- crates/core/src/rpc/full.rs | 2 +- crates/core/src/rpc/minimal.rs | 5 +- crates/core/src/surfnet/locker.rs | 194 ++++++++++++++++++------------ crates/core/src/surfnet/svm.rs | 178 ++++++++++++++++----------- 4 files changed, 228 insertions(+), 151 deletions(-) diff --git a/crates/core/src/rpc/full.rs b/crates/core/src/rpc/full.rs index 93b13fd78..438398875 100644 --- a/crates/core/src/rpc/full.rs +++ b/crates/core/src/rpc/full.rs @@ -1531,7 +1531,7 @@ impl Full for SurfpoolFullRpc { }; let svm_locker = ctx.svm_locker; let res = svm_locker - .airdrop(&pubkey, lamports) + .airdrop(&pubkey, lamports)? .map_err(|err| Error::invalid_params(format!("failed to send transaction: {err:?}")))?; let _ = ctx .simnet_commands_tx diff --git a/crates/core/src/rpc/minimal.rs b/crates/core/src/rpc/minimal.rs index 96e3706df..8bfab80ff 100644 --- a/crates/core/src/rpc/minimal.rs +++ b/crates/core/src/rpc/minimal.rs @@ -693,9 +693,8 @@ impl Minimal for SurfpoolMinimalRpc { let config = config.unwrap_or_default(); if let Some(target_slot) = config.min_context_slot { - let block_exists = meta - .with_svm_reader(|svm_reader| svm_reader.blocks.contains_key(&target_slot)) - .map_err(Into::::into)??; + let block_exists = + meta.with_svm_reader(|svm_reader| svm_reader.blocks.contains_key(&target_slot))??; if !block_exists { return Err(jsonrpc_core::Error::invalid_params(format!( diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 85f9ed0b4..324709e78 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -237,20 +237,19 @@ impl SurfnetSvmLocker { /// Retrieves a local account from the SVM cache, returning a contextualized result. pub fn get_account_local(&self, pubkey: &Pubkey) -> SvmAccessContext { self.with_contextualized_svm_reader(|svm_reader| { - match svm_reader.inner.get_account(pubkey) { - Some(account) => GetAccountResult::FoundAccount( - *pubkey, account, - // mark as not an account that should be updated in the SVM, since this is a local read and it already exists - false, - ), - None => match svm_reader.get_account_from_feature_set(pubkey) { + let result = svm_reader.inner.get_account_result(pubkey).unwrap(); + + if result.is_none() { + return match svm_reader.get_account_from_feature_set(pubkey) { Some(account) => GetAccountResult::FoundAccount( *pubkey, account, // mark as not an account that should be updated in the SVM, since this is a local read and it already exists false, ), None => GetAccountResult::None(*pubkey), - }, + }; + } else { + return result; } }) } @@ -312,22 +311,18 @@ impl SurfnetSvmLocker { let mut accounts = vec![]; for pubkey in pubkeys { - let res = match svm_reader.inner.get_account(pubkey) { - Some(account) => GetAccountResult::FoundAccount( - *pubkey, account, - // mark as not an account that should be updated in the SVM, since this is a local read and it already exists - false, - ), - None => match svm_reader.get_account_from_feature_set(pubkey) { + let mut result = svm_reader.inner.get_account_result(pubkey).unwrap(); + if result.is_none() { + result = match svm_reader.get_account_from_feature_set(pubkey) { Some(account) => GetAccountResult::FoundAccount( *pubkey, account, // mark as not an account that should be updated in the SVM, since this is a local read and it already exists false, ), None => GetAccountResult::None(*pubkey), - }, + } }; - accounts.push(res); + accounts.push(result); } accounts }) @@ -999,7 +994,7 @@ impl SurfnetSvmLocker { let accounts_before = transaction_accounts .iter() .map(|p| svm_reader.inner.get_account(p)) - .collect::>>(); + .collect::>, SurfpoolError>>()?; let token_accounts_before = transaction_accounts .iter() @@ -1012,13 +1007,19 @@ impl SurfnetSvmLocker { .map(|(i, ta)| { svm_reader .get_account(&transaction_accounts[*i]) - .map(|a| a.owner) - .unwrap_or(ta.token_program_id()) + .map(|res| res.map(|a| a.owner).unwrap_or(ta.token_program_id())) }) - .collect::>() - .clone(); - (accounts_before, token_accounts_before, token_programs) - }); + .collect::, SurfpoolError>>()?; + + Ok::< + ( + Vec>, + Vec<(usize, TokenAccount)>, + Vec, + ), + SurfpoolError, + >((accounts_before, token_accounts_before, token_programs)) + })?; let loaded_addresses = tx_loaded_addresses.as_ref().map(|l| l.loaded_addresses()); @@ -1247,7 +1248,7 @@ impl SurfnetSvmLocker { pre_execution_capture: ExecutionCapture, status_tx: Sender, do_propagate: bool, - ) -> ProfileResult { + ) -> SurfpoolResult { let FailedTransactionMetadata { err, meta } = failed_transaction_metadata; let cus = meta.compute_units_consumed; @@ -1258,7 +1259,7 @@ impl SurfnetSvmLocker { let accounts_after = pubkeys_from_message .iter() .map(|p| self.with_svm_reader(|svm_reader| svm_reader.inner.get_account(p))) - .collect::>>(); + .collect::>>>()?; for (pubkey, (before, after)) in pubkeys_from_message .iter() @@ -1352,13 +1353,13 @@ impl SurfnetSvmLocker { )); }); } - ProfileResult::new( + Ok(ProfileResult::new( pre_execution_capture, BTreeMap::new(), cus, Some(log_messages), Some(err_string), - ) + )) } #[allow(clippy::too_many_arguments)] @@ -1384,7 +1385,7 @@ impl SurfnetSvmLocker { let accounts_after = pubkeys_from_message .iter() .map(|p| svm_writer.inner.get_account(p)) - .collect::>>(); + .collect::>>>()?; let (sanitized_transaction, versioned_transaction) = if do_propagate { ( @@ -1590,7 +1591,7 @@ impl SurfnetSvmLocker { pre_execution_capture, status_tx.clone(), do_propagate, - ), + )?, }; Ok(res) } @@ -1759,7 +1760,7 @@ impl SurfnetSvmLocker { self.get_token_accounts_by_owner_local_then_remote(owner, filter, remote_client, config) .await } else { - Ok(self.get_token_accounts_by_owner_local(owner, filter, config)) + self.get_token_accounts_by_owner_local(owner, filter, config) } } @@ -1768,29 +1769,50 @@ impl SurfnetSvmLocker { owner: Pubkey, filter: &TokenAccountsFilter, config: &RpcAccountInfoConfig, - ) -> SvmAccessContext> { - self.with_contextualized_svm_reader(|svm_reader| { + ) -> SurfpoolContextualizedResult> { + let result = self.with_contextualized_svm_reader(|svm_reader| { svm_reader .get_parsed_token_accounts_by_owner(&owner) .iter() .filter_map(|(pubkey, token_account)| { - let account = svm_reader.get_account(pubkey)?; - if match filter { - TokenAccountsFilter::Mint(mint) => token_account.mint().eq(mint), - TokenAccountsFilter::ProgramId(program_id) => account.owner.eq(program_id), - } { - Some(svm_reader.account_to_rpc_keyed_account( - pubkey, - &account, - config, - Some(token_account.mint()), - )) - } else { - None - } + svm_reader + .get_account(pubkey) + .map(|res| { + let Some(account) = res else { + return None; + }; + if match filter { + TokenAccountsFilter::Mint(mint) => token_account.mint().eq(mint), + TokenAccountsFilter::ProgramId(program_id) => { + account.owner.eq(program_id) + } + } { + Some(svm_reader.account_to_rpc_keyed_account( + pubkey, + &account, + config, + Some(token_account.mint()), + )) + } else { + None + } + }) + .transpose() }) - .collect::>() - }) + .collect::>>() + }); + let SvmAccessContext { + slot, + latest_epoch_info, + latest_blockhash, + inner: accounts, + } = result; + Ok(SvmAccessContext::new( + slot, + latest_epoch_info, + latest_blockhash, + accounts?, + )) } pub async fn get_token_accounts_by_owner_local_then_remote( @@ -1805,7 +1827,7 @@ impl SurfnetSvmLocker { latest_epoch_info, latest_blockhash, inner: local_accounts, - } = self.get_token_accounts_by_owner_local(owner, filter, config); + } = self.get_token_accounts_by_owner_local(owner, filter, config)?; let remote_accounts = remote_client .get_token_accounts_by_owner(owner, filter, config) @@ -1855,7 +1877,7 @@ impl SurfnetSvmLocker { ) .await } else { - Ok(self.get_token_accounts_by_delegate_local(delegate, filter, config)) + self.get_token_accounts_by_delegate_local(delegate, filter, config) } } } @@ -1867,33 +1889,53 @@ impl SurfnetSvmLocker { delegate: Pubkey, filter: &TokenAccountsFilter, config: &RpcAccountInfoConfig, - ) -> SvmAccessContext> { - self.with_contextualized_svm_reader(|svm_reader| { + ) -> SurfpoolContextualizedResult> { + let result = self.with_contextualized_svm_reader(|svm_reader| { svm_reader .get_token_accounts_by_delegate(&delegate) .iter() .filter_map(|(pubkey, token_account)| { - let account = svm_reader.get_account(pubkey)?; - let include = match filter { - TokenAccountsFilter::Mint(mint) => token_account.mint() == *mint, - TokenAccountsFilter::ProgramId(program_id) => { - account.owner == *program_id && is_supported_token_program(program_id) - } - }; - - if include { - Some(svm_reader.account_to_rpc_keyed_account( - pubkey, - &account, - config, - Some(token_account.mint()), - )) - } else { - None - } + svm_reader + .get_account(pubkey) + .map(|res| { + let Some(account) = res else { + return None; + }; + let include = match filter { + TokenAccountsFilter::Mint(mint) => token_account.mint() == *mint, + TokenAccountsFilter::ProgramId(program_id) => { + account.owner == *program_id + && is_supported_token_program(program_id) + } + }; + + if include { + Some(svm_reader.account_to_rpc_keyed_account( + pubkey, + &account, + config, + Some(token_account.mint()), + )) + } else { + None + } + }) + .transpose() }) - .collect::>() - }) + .collect::>>() + }); + let SvmAccessContext { + slot, + latest_epoch_info, + latest_blockhash, + inner: accounts, + } = result; + Ok(SvmAccessContext::new( + slot, + latest_epoch_info, + latest_blockhash, + accounts?, + )) } pub async fn get_token_accounts_by_delegate_local_then_remote( @@ -1908,7 +1950,7 @@ impl SurfnetSvmLocker { latest_epoch_info, latest_blockhash, inner: local_accounts, - } = self.get_token_accounts_by_delegate_local(delegate, filter, config); + } = self.get_token_accounts_by_delegate_local(delegate, filter, config)?; let remote_accounts = remote_client .get_token_accounts_by_delegate(delegate, filter, config) @@ -2581,7 +2623,7 @@ impl SurfnetSvmLocker { filters: Option>, ) -> SurfpoolContextualizedResult> { let res = self.with_svm_reader(|svm_reader| { - let res = svm_reader.get_account_owned_by(program_id); + let res = svm_reader.get_account_owned_by(program_id)?; let mut filtered = vec![]; for (pubkey, account) in &res { @@ -2824,7 +2866,7 @@ impl SurfnetSvmLocker { /// Executes an airdrop via the underlying SVM. #[allow(clippy::result_large_err)] - pub fn airdrop(&self, pubkey: &Pubkey, lamports: u64) -> TransactionResult { + pub fn airdrop(&self, pubkey: &Pubkey, lamports: u64) -> SurfpoolResult { self.with_svm_writer(|svm_writer| svm_writer.airdrop(pubkey, lamports)) } diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 76f458fc9..710d873b4 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -27,13 +27,9 @@ use base64::{Engine, prelude::BASE64_STANDARD}; use chrono::Utc; use convert_case::Casing; use crossbeam_channel::{Receiver, Sender, unbounded}; -use litesvm::{ - LiteSVM, - types::{ - FailedTransactionMetadata, SimulatedTransactionInfo, TransactionMetadata, TransactionResult, - }, +use litesvm::types::{ + FailedTransactionMetadata, SimulatedTransactionInfo, TransactionMetadata, TransactionResult, }; -use litesvm_token::create_native_mint; use solana_account::{Account, AccountSharedData, ReadableAccount}; use solana_account_decoder::{ UiAccount, UiAccountData, UiAccountEncoding, UiDataSliceConfig, encode_ui_account, @@ -102,7 +98,9 @@ use crate::{ rpc::utils::convert_transaction_metadata_from_canonical, scenarios::TemplateRegistry, storage::{SqliteStorage, Storage, StorageConstructor}, - surfnet::{LogsSubscriptionData, locker::is_supported_token_program}, + surfnet::{ + LogsSubscriptionData, locker::is_supported_token_program, surfnet_lite_svm::SurfnetLiteSvm, + }, types::{ GeyserAccountUpdate, MintAccount, SurfnetTransactionStatus, SyntheticBlockhash, TokenAccount, TransactionWithStatusMeta, @@ -216,7 +214,7 @@ pub fn get_txtx_value_json_converters() -> Vec> { /// It also exposes channels to listen for simulation events (`SimnetEvent`) and Geyser plugin events (`GeyserEvent`). #[derive(Clone)] pub struct SurfnetSvm { - pub inner: LiteSVM, + pub inner: SurfnetLiteSvm, pub remote_rpc_url: Option, pub chain_tip: BlockIdentifier, pub blocks: Box>, @@ -309,15 +307,10 @@ impl SurfnetSvm { // todo: consider making this configurable via config feature_set.deactivate(&enable_extend_program_checked::id()); - let mut inner = LiteSVM::new() - .with_feature_set(feature_set.clone()) - .with_blockhash_check(false) - .with_sigverify(false); + let inner = SurfnetLiteSvm::new().initialize(feature_set.clone(), database_url)?; - // Add the native mint (SOL) to the SVM - create_native_mint(&mut inner); let native_mint_account = inner - .get_account(&spl_token_interface::native_mint::ID) + .get_account(&spl_token_interface::native_mint::ID)? .unwrap(); let parsed_mint_account = MintAccount::unpack(&native_mint_account.data).unwrap(); @@ -433,14 +426,8 @@ impl SurfnetSvm { } } - // Rebuild LiteSVM with updated feature set - self.inner = LiteSVM::new() - .with_feature_set(self.feature_set.clone()) - .with_blockhash_check(false) - .with_sigverify(false); - - // Re-add the native mint - create_native_mint(&mut self.inner); + // Rebuild inner VM with updated feature set + self.inner.apply_feature_config(self.feature_set.clone()); } /// Maps an SvmFeature enum variant to its corresponding feature ID (Pubkey). @@ -592,13 +579,13 @@ impl SurfnetSvm { /// # Returns /// A `TransactionResult` indicating success or failure. #[allow(clippy::result_large_err)] - pub fn airdrop(&mut self, pubkey: &Pubkey, lamports: u64) -> TransactionResult { + pub fn airdrop(&mut self, pubkey: &Pubkey, lamports: u64) -> SurfpoolResult { let res = self.inner.airdrop(pubkey, lamports); let (status_tx, _rx) = unbounded(); if let Ok(ref tx_result) = res { let airdrop_keypair = Keypair::new(); let slot = self.latest_epoch_info.absolute_slot; - let account = self.get_account(pubkey).unwrap(); + let account = self.get_account(pubkey)?.unwrap(); let mut tx = VersionedTransaction::try_new( VersionedMessage::Legacy(Message::new( @@ -619,7 +606,7 @@ impl SurfnetSvm { tx.signatures[0] = tx_result.signature; let system_lamports = self - .get_account(&system_program::id()) + .get_account(&system_program::id())? .map(|a| a.lamports()) .unwrap_or(1); self.transactions.insert( @@ -669,10 +656,10 @@ impl SurfnetSvm { ); self.transactions_queued_for_confirmation .push_back((tx, status_tx.clone(), None)); - let account = self.get_account(pubkey).unwrap(); - let _ = self.set_account(pubkey, account); + let account = self.get_account(pubkey)?.unwrap(); + self.set_account(pubkey, account)?; } - res + Ok(res) } /// Airdrops a specified amount of lamports to a list of public keys. @@ -682,11 +669,20 @@ impl SurfnetSvm { /// * `addresses` - Slice of recipient public keys. pub fn airdrop_pubkeys(&mut self, lamports: u64, addresses: &[Pubkey]) { for recipient in addresses { - let _ = self.airdrop(recipient, lamports); - let _ = self.simnet_events_tx.send(SimnetEvent::info(format!( - "Genesis airdrop successful {}: {}", - recipient, lamports - ))); + match self.airdrop(recipient, lamports) { + Ok(_) => { + let _ = self.simnet_events_tx.send(SimnetEvent::info(format!( + "Genesis airdrop successful {}: {}", + recipient, lamports + ))); + } + Err(e) => { + let _ = self.simnet_events_tx.send(SimnetEvent::error(format!( + "Genesis airdrop failed {}: {}", + recipient, e + ))); + } + }; } } @@ -904,16 +900,19 @@ impl SurfnetSvm { pubkey: &Pubkey, account: &Account, ) -> SurfpoolResult<()> { + self.inner + .set_account_in_db(*pubkey, account.clone().into())?; + if account == &Account::default() { self.closed_accounts.insert(*pubkey); - if let Some(old_account) = self.get_account(pubkey) { + if let Some(old_account) = self.get_account(pubkey)? { self.remove_from_indexes(pubkey, &old_account); } return Ok(()); } // only update our indexes if the account exists in the svm accounts db - if let Some(old_account) = self.get_account(pubkey) { + if let Some(old_account) = self.get_account(pubkey)? { self.remove_from_indexes(pubkey, &old_account); } // add to owner index (check for duplicates) @@ -1033,16 +1032,11 @@ impl SurfnetSvm { } pub fn reset_network(&mut self) -> SurfpoolResult<()> { - // pub inner: LiteSVM, - let mut inner = LiteSVM::new() - .with_feature_set(self.feature_set.clone()) - .with_blockhash_check(false) - .with_sigverify(false); - - // Add the native mint (SOL) to the SVM - create_native_mint(&mut inner); - let native_mint_account = inner - .get_account(&spl_token_interface::native_mint::ID) + self.inner.reset(self.feature_set.clone())?; + + let native_mint_account = self + .inner + .get_account(&spl_token_interface::native_mint::ID)? .unwrap(); let parsed_mint_account = MintAccount::unpack(&native_mint_account.data).unwrap(); @@ -1054,7 +1048,6 @@ impl SurfnetSvm { let token_mints = HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); - self.inner = inner; self.blocks.clear()?; self.transactions.clear(); self.transactions_queued_for_confirmation.clear(); @@ -1083,7 +1076,7 @@ impl SurfnetSvm { pubkey: &Pubkey, include_owned_accounts: bool, ) -> SurfpoolResult<()> { - let Some(account) = self.get_account(pubkey) else { + let Some(account) = self.get_account(pubkey)? else { return Ok(()); }; @@ -1098,7 +1091,7 @@ impl SurfnetSvm { } } if include_owned_accounts { - let owned_accounts = self.get_account_owned_by(pubkey); + let owned_accounts = self.get_account_owned_by(pubkey)?; for (owned_pubkey, _) in owned_accounts { // Avoid infinite recursion by not cascading further self.purge_account_from_cache(&account, &owned_pubkey)?; @@ -1419,10 +1412,18 @@ impl SurfnetSvm { if let Some((programdata_address, programdata_account)) = init_programdata_account(&account) { - if self.get_account(&programdata_address).is_none() { - if let Err(e) = - self.set_account(&programdata_address, programdata_account) - { + match self.get_account(&programdata_address) { + Ok(None) => { + if let Err(e) = + self.set_account(&programdata_address, programdata_account) + { + let _ = self + .simnet_events_tx + .send(SimnetEvent::error(e.to_string())); + } + } + Ok(Some(_)) => {} + Err(e) => { let _ = self .simnet_events_tx .send(SimnetEvent::error(e.to_string())); @@ -1440,9 +1441,18 @@ impl SurfnetSvm { if let Some((programdata_address, programdata_account)) = init_programdata_account(&account) { - if self.get_account(&programdata_address).is_none() { - if let Err(e) = self.set_account(&programdata_address, programdata_account) - { + match self.get_account(&programdata_address) { + Ok(None) => { + if let Err(e) = + self.set_account(&programdata_address, programdata_account) + { + let _ = self + .simnet_events_tx + .send(SimnetEvent::error(e.to_string())); + } + } + Ok(Some(_)) => {} + Err(e) => { let _ = self .simnet_events_tx .send(SimnetEvent::error(e.to_string())); @@ -1486,9 +1496,13 @@ impl SurfnetSvm { } } - pub fn confirm_current_block(&mut self) -> Result<(), SurfpoolError> { + pub fn confirm_current_block(&mut self) -> SurfpoolResult<()> { let slot = self.get_latest_absolute_slot(); let previous_chain_tip = self.chain_tip.clone(); + if slot % 100 == 0 { + debug!("Clearing liteSVM cache at slot {}", slot); + self.inner.garbage_collect(self.feature_set.clone()); + } self.chain_tip = self.new_blockhash(); // Confirm processed transactions let (confirmed_signatures, all_mutated_account_keys) = self.confirm_transactions()?; @@ -1496,7 +1510,7 @@ impl SurfnetSvm { // Notify Geyser plugin of account updates for pubkey in all_mutated_account_keys { - let Some(account) = self.inner.get_account(&pubkey) else { + let Some(account) = self.inner.get_account(&pubkey)? else { continue; }; self.geyser_events_tx @@ -1686,7 +1700,7 @@ impl SurfnetSvm { ); // Get the account from the SVM - let Some(account) = self.inner.get_account(&account_pubkey) else { + let Some(account) = self.inner.get_account(&account_pubkey)? else { warn!( "Account {} not found in SVM for override {}, skipping modifications", account_pubkey, override_instance.id @@ -2088,18 +2102,23 @@ impl SurfnetSvm { /// # Returns /// /// * A vector of (account_pubkey, account) tuples for all accounts owned by the program. - pub fn get_account_owned_by(&self, program_id: &Pubkey) -> Vec<(Pubkey, Account)> { - if let Some(account_pubkeys) = self.accounts_by_owner.get(program_id) { + pub fn get_account_owned_by( + &self, + program_id: &Pubkey, + ) -> SurfpoolResult> { + let res = if let Some(account_pubkeys) = self.accounts_by_owner.get(program_id) { account_pubkeys .iter() .filter_map(|pubkey| { self.get_account(pubkey) - .map(|account| (*pubkey, account.clone())) + .map(|res| res.map(|account| (*pubkey, account.clone()))) + .transpose() }) - .collect() + .collect::, SurfpoolError>>()? } else { Vec::new() - } + }; + Ok(res) } fn get_additional_data( @@ -2180,16 +2199,25 @@ impl SurfnetSvm { } } - pub fn get_token_accounts_by_owner(&self, owner: &Pubkey) -> Vec<(Pubkey, Account)> { - self.token_accounts_by_owner + pub fn get_token_accounts_by_owner( + &self, + owner: &Pubkey, + ) -> SurfpoolResult> { + Ok(self + .token_accounts_by_owner .get(owner) .map(|account_pubkeys| { account_pubkeys .iter() - .filter_map(|pk| self.get_account(pk).map(|account| (*pk, account.clone()))) - .collect() + .filter_map(|pk| { + self.get_account(pk) + .map(|res| res.map(|account| (*pk, account.clone()))) + .transpose() + }) + .collect::, SurfpoolError>>() }) - .unwrap_or_default() + .transpose()? + .unwrap_or_default()) } /// Gets all token accounts for a specific mint (token type). @@ -2565,12 +2593,20 @@ impl SurfnetSvm { ) } - pub fn get_account(&self, pubkey: &Pubkey) -> Option { + pub fn get_account(&self, pubkey: &Pubkey) -> SurfpoolResult> { self.inner.get_account(pubkey) } pub fn iter_accounts(&self) -> std::collections::hash_map::Iter<'_, Pubkey, AccountSharedData> { - self.inner.accounts_db().inner.iter() + todo!() + // self.inner.accounts_db().inner.iter() + } + + pub fn get_transaction( + &self, + signature: &Signature, + ) -> SurfpoolResult> { + Ok(self.transactions.get(signature)) } pub fn start_runbook_execution(&mut self, runbook_id: String) { From 3b200caa6ed0d696ccd62371e019787b78b0c71b Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:57:39 -0500 Subject: [PATCH 15/54] propagate changes in `inner` in `svm.rs` to all tests by adding unwraps on account fetching --- crates/core/src/rpc/accounts_data.rs | 4 + crates/core/src/rpc/full.rs | 9 +- crates/core/src/rpc/surfnet_cheatcodes.rs | 115 +++++++++++++++++----- crates/core/src/surfnet/svm.rs | 45 +++++---- crates/core/src/tests/helpers.rs | 7 -- crates/core/src/tests/integration.rs | 82 ++++++++++++--- 6 files changed, 197 insertions(+), 65 deletions(-) diff --git a/crates/core/src/rpc/accounts_data.rs b/crates/core/src/rpc/accounts_data.rs index fe5c682a6..eafb6451e 100644 --- a/crates/core/src/rpc/accounts_data.rs +++ b/crates/core/src/rpc/accounts_data.rs @@ -1082,6 +1082,7 @@ mod tests { .context .svm_locker .airdrop(&fee_payer.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Airdrop 1 SOL to recipient for rent exemption @@ -1089,6 +1090,7 @@ mod tests { .context .svm_locker .airdrop(&recipient.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Generate keypair to use as address of mint @@ -1295,6 +1297,7 @@ mod tests { .context .svm_locker .airdrop(&fee_payer.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Airdrop 1 SOL to recipient for rent exemption @@ -1302,6 +1305,7 @@ mod tests { .context .svm_locker .airdrop(&recipient.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Generate keypair to use as address of mint diff --git a/crates/core/src/rpc/full.rs b/crates/core/src/rpc/full.rs index 438398875..aa711522e 100644 --- a/crates/core/src/rpc/full.rs +++ b/crates/core/src/rpc/full.rs @@ -2710,12 +2710,17 @@ mod tests { let sig = Signature::from_str(res.as_str()).unwrap(); let state_reader = setup.context.svm_locker.0.blocking_read(); assert_eq!( - state_reader.inner.get_account(&pk).unwrap().lamports, + state_reader + .inner + .get_account(&pk) + .unwrap() + .unwrap() + .lamports, lamports, "airdropped amount is incorrect" ); assert!( - state_reader.inner.get_transaction(&sig).is_some(), + state_reader.get_transaction(&sig).unwrap().is_some(), "transaction is not found in the SVM" ); assert!( diff --git a/crates/core/src/rpc/surfnet_cheatcodes.rs b/crates/core/src/rpc/surfnet_cheatcodes.rs index 16eca3027..47d32bc95 100644 --- a/crates/core/src/rpc/surfnet_cheatcodes.rs +++ b/crates/core/src/rpc/surfnet_cheatcodes.rs @@ -1866,6 +1866,7 @@ mod tests { .context .svm_locker .airdrop(&payer.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Airdrop 1 SOL to recipient for rent exemption @@ -1873,6 +1874,7 @@ mod tests { .context .svm_locker .airdrop(&recipient.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Generate keypair to use as address of mint @@ -3056,10 +3058,9 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); - let program_account_before = client - .context - .svm_locker - .with_svm_reader(|svm_reader| svm_reader.inner.get_account(&program_id.pubkey())); + let program_account_before = client.context.svm_locker.with_svm_reader(|svm_reader| { + svm_reader.inner.get_account(&program_id.pubkey()).unwrap() + }); assert!( program_account_before.is_none(), "Program account should not exist initially" @@ -3085,10 +3086,9 @@ mod tests { ); // Verify program account was created - let program_account = client - .context - .svm_locker - .with_svm_reader(|svm_reader| svm_reader.inner.get_account(&program_id.pubkey())); + let program_account = client.context.svm_locker.with_svm_reader(|svm_reader| { + svm_reader.inner.get_account(&program_id.pubkey()).unwrap() + }); assert!( program_account.is_some(), "Program account should be created" @@ -3105,10 +3105,9 @@ mod tests { ); // Verify program data account was created - let program_data_account = client - .context - .svm_locker - .with_svm_reader(|svm_reader| svm_reader.inner.get_account(&program_data_address)); + let program_data_account = client.context.svm_locker.with_svm_reader(|svm_reader| { + svm_reader.inner.get_account(&program_data_address).unwrap() + }); assert!( program_data_account.is_some(), "Program data account should be created" @@ -3157,7 +3156,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3202,7 +3205,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3255,7 +3262,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3314,7 +3325,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3378,7 +3393,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3427,7 +3446,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3463,7 +3486,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3528,7 +3555,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3631,6 +3662,7 @@ mod tests { .inner .get_account(&program_data_address) .unwrap() + .unwrap() .lamports }); @@ -3653,6 +3685,7 @@ mod tests { .inner .get_account(&program_data_address) .unwrap() + .unwrap() .lamports }); @@ -3663,7 +3696,11 @@ mod tests { // Verify rent exemption let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let required_lamports = client.context.svm_locker.with_svm_reader(|svm_reader| { @@ -3705,7 +3742,11 @@ mod tests { // Check program account ownership let program_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_id.pubkey()).unwrap() + svm_reader + .inner + .get_account(&program_id.pubkey()) + .unwrap() + .unwrap() }); assert_eq!( program_account.owner, @@ -3719,7 +3760,11 @@ mod tests { // Check program data account ownership let program_data_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); assert_eq!( program_data_account.owner, @@ -3776,7 +3821,11 @@ mod tests { // Get initial metadata let initial_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3799,7 +3848,11 @@ mod tests { // Verify metadata is preserved let final_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let final_metadata = final_account.data[..metadata_size].to_vec(); @@ -3837,7 +3890,11 @@ mod tests { solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let first_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); // Second write (same data, same offset) @@ -3854,7 +3911,11 @@ mod tests { .unwrap(); let second_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); assert_eq!( diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 710d873b4..0dee6e59e 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -3034,7 +3034,10 @@ mod tests { Ok(event) => match event { SimnetEvent::AccountUpdate(_, account_pubkey) => { assert_eq!(pubkey, &account_pubkey); - assert_eq!(svm.get_account(&pubkey).as_ref(), Some(expected_account)); + assert_eq!( + svm.get_account(&pubkey).unwrap().as_ref(), + Some(expected_account) + ); true } event => { @@ -3118,27 +3121,27 @@ mod tests { // GetAccountResult::None should be a noop when writing account updates { - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.inner.get_all_accounts().unwrap(); let empty_update = GetAccountResult::None(pubkey); svm.write_account_update(empty_update); - assert_eq!(svm.inner.accounts_db().clone().inner, index_before); + assert_eq!(svm.inner.get_all_accounts().unwrap(), index_before); } // GetAccountResult::FoundAccount with `DoUpdateSvm` flag to false should be a noop { - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.inner.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount(pubkey, account.clone(), false); svm.write_account_update(found_update); - assert_eq!(svm.inner.accounts_db().clone().inner, index_before); + assert_eq!(svm.inner.get_all_accounts().unwrap(), index_before); } // GetAccountResult::FoundAccount with `DoUpdateSvm` flag to true should update the account { - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.inner.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount(pubkey, account.clone(), true); svm.write_account_update(found_update); assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.inner.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event(&events_rx, &svm, &pubkey, &account) { @@ -3172,7 +3175,7 @@ mod tests { rent_epoch: 0, }; - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.inner.get_all_accounts().unwrap(); let found_program_account_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, None), @@ -3196,7 +3199,7 @@ mod tests { ); } assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.inner.get_all_accounts().unwrap().len(), index_before.len() + 2 ); } @@ -3206,14 +3209,14 @@ mod tests { let (program_address, program_account, program_data_address, program_data_account) = create_program_accounts(); - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.inner.get_all_accounts().unwrap(); let found_program_account_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, Some(program_data_account.clone())), ); svm.write_account_update(found_program_account_update); assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.inner.get_all_accounts().unwrap().len(), index_before.len() + 2 ); if !expect_account_update_event( @@ -3240,7 +3243,7 @@ mod tests { let (program_address, program_account, program_data_address, program_data_account) = create_program_accounts(); - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.inner.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount( program_data_address, program_data_account.clone(), @@ -3248,7 +3251,7 @@ mod tests { ); svm.write_account_update(found_update); assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.inner.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event( @@ -3262,14 +3265,14 @@ mod tests { ); } - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.inner.get_all_accounts().unwrap(); let program_account_found_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, None), ); svm.write_account_update(program_account_found_update); assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.inner.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event(&events_rx, &svm, &program_address, &program_account) { @@ -3688,6 +3691,7 @@ mod tests { assert!( svm.inner .get_account(&spl_token_interface::native_mint::ID) + .unwrap() .is_some() ); @@ -3698,6 +3702,7 @@ mod tests { assert!( svm.inner .get_account(&spl_token_interface::native_mint::ID) + .unwrap() .is_some() ); } @@ -3786,7 +3791,10 @@ mod tests { svm.set_account(&token_account_pubkey, account).unwrap(); - assert_eq!(svm.get_token_accounts_by_owner(&token_owner).len(), 1); + assert_eq!( + svm.get_token_accounts_by_owner(&token_owner).unwrap().len(), + 1 + ); assert_eq!(svm.get_token_accounts_by_delegate(&delegate).len(), 1); assert!(!svm.closed_accounts.contains(&token_account_pubkey)); @@ -3796,7 +3804,10 @@ mod tests { assert!(svm.closed_accounts.contains(&token_account_pubkey)); - assert_eq!(svm.get_token_accounts_by_owner(&token_owner).len(), 0); + assert_eq!( + svm.get_token_accounts_by_owner(&token_owner).unwrap().len(), + 0 + ); assert_eq!(svm.get_token_accounts_by_delegate(&delegate).len(), 0); assert!(svm.token_accounts.get(&token_account_pubkey).is_none()); } diff --git a/crates/core/src/tests/helpers.rs b/crates/core/src/tests/helpers.rs index a07226847..b048dfc41 100644 --- a/crates/core/src/tests/helpers.rs +++ b/crates/core/src/tests/helpers.rs @@ -2,7 +2,6 @@ use std::net::TcpListener; use crossbeam_channel::Sender; -use litesvm::LiteSVM; use solana_clock::Clock; use solana_epoch_info::EpochInfo; use solana_transaction::versioned::VersionedTransaction; @@ -83,12 +82,6 @@ where setup } - pub fn new_with_svm(rpc: T, svm: LiteSVM) -> Self { - let setup = TestSetup::new(rpc); - setup.context.svm_locker.0.blocking_write().inner = svm; - setup - } - pub fn new_with_mempool(rpc: T, simnet_commands_tx: Sender) -> Self { let mut setup = TestSetup::new(rpc); setup.context.simnet_commands_tx = simnet_commands_tx; diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index 41cc8b3cb..47ff149ba 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -702,6 +702,7 @@ async fn test_surfnet_estimate_compute_units() { svm_instance .airdrop(&payer.pubkey(), lamports_to_send * 2) + .unwrap() .unwrap(); let instruction = transfer(&payer.pubkey(), &recipient, lamports_to_send); @@ -913,7 +914,7 @@ async fn test_surfnet_estimate_compute_units() { response_no_tag_again.is_ok(), "RPC call with None tag (again) failed" ); - let rpc_response_no_tag_again_value = response_no_tag_again.unwrap().value; + let _rpc_response_no_tag_again_value = response_no_tag_again.unwrap().value; println!("Retrieving profile results for tag: {} again", tag1); let results_response_tag1_again = @@ -946,6 +947,7 @@ async fn test_surfnet_estimate_compute_units() { let (mut svm_for_send, simnet_rx_for_send, _geyser_rx_for_send) = SurfnetSvm::new(); svm_for_send .airdrop(&payer.pubkey(), lamports_to_send * 2) + .unwrap() .unwrap(); let latest_blockhash_for_send = svm_for_send.latest_blockhash(); @@ -988,6 +990,7 @@ async fn test_get_transaction_profile() { svm_instance .airdrop(&payer.pubkey(), lamports_to_send * 2) + .unwrap() .unwrap(); // Create a transaction to profile @@ -1466,6 +1469,7 @@ async fn test_profile_transaction_basic() { // Airdrop SOL to payer svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 2)) + .unwrap() .unwrap(); // Create a simple transfer transaction @@ -1545,6 +1549,7 @@ async fn test_profile_transaction_multi_instruction_basic() { svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 4)) + .unwrap() .unwrap(); // Create a multi-instruction transaction: 3 transfers to different recipients @@ -1941,6 +1946,7 @@ async fn test_profile_transaction_with_tag() { // Airdrop SOL to payer svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 3)) + .unwrap() .unwrap(); // Create a simple transfer transaction @@ -2100,6 +2106,7 @@ async fn test_profile_transaction_token_transfer() { // Airdrop SOL to payer svm_locker .airdrop(&payer.pubkey(), lamports_to_send) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -2132,7 +2139,7 @@ async fn test_profile_transaction_token_transfer() { &spl_token_2022_interface::id(), ); println!("Source ATA: {}", source_ata); - let dest_ata = spl_associated_token_account_interface::address::get_associated_token_address_with_program_id( + let _dest_ata = spl_associated_token_account_interface::address::get_associated_token_address_with_program_id( &recipient, &mint.pubkey(), &spl_token_2022_interface::id(), @@ -2146,7 +2153,7 @@ async fn test_profile_transaction_token_transfer() { &spl_token_2022_interface::id(), ); - let create_dest_ata_ix = + let _create_dest_ata_ix = spl_associated_token_account_interface::instruction::create_associated_token_account( &payer.pubkey(), &recipient, @@ -2156,7 +2163,7 @@ async fn test_profile_transaction_token_transfer() { // Mint tokens let mint_amount = 100_00; // 100 tokens with 2 decimals - let mint_to_ix = spl_token_2022_interface::instruction::mint_to( + let _mint_to_ix = spl_token_2022_interface::instruction::mint_to( &spl_token_2022_interface::id(), &mint.pubkey(), &source_ata, @@ -2531,6 +2538,7 @@ async fn test_profile_transaction_insufficient_funds() { svm_locker .airdrop(&payer.pubkey(), insufficient_funds) + .unwrap() .unwrap(); // Create a transfer transaction that will fail due to insufficient funds @@ -2597,6 +2605,7 @@ async fn test_profile_transaction_multi_instruction_failure() { // Airdrop SOL to payer svm_locker .airdrop(&payer.pubkey(), lamports_to_send * 3) + .unwrap() .unwrap(); // Create a multi-instruction transaction where the second instruction will fail @@ -2675,6 +2684,7 @@ async fn test_profile_transaction_with_encoding() { // Airdrop SOL to payer svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 2)) + .unwrap() .unwrap(); // Create a simple transfer transaction @@ -2742,6 +2752,7 @@ async fn test_profile_transaction_with_tag_and_retrieval() { // Airdrop SOL to payer svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 3)) + .unwrap() .unwrap(); // Create a simple transfer transaction @@ -2840,6 +2851,7 @@ async fn test_profile_transaction_empty_instruction() { // Airdrop SOL to payer svm_locker .airdrop(&payer.pubkey(), lamports_to_send) + .unwrap() .unwrap(); // Create a transaction with no instructions @@ -2896,6 +2908,7 @@ async fn test_profile_transaction_versioned_message() { // Airdrop SOL to payer svm_locker .airdrop(&payer.pubkey(), 2 * lamports_to_send) + .unwrap() .unwrap(); svm_locker.confirm_current_block(&None).await.unwrap(); @@ -2967,6 +2980,7 @@ async fn test_get_local_signatures_without_limit() { svm_locker_for_context .airdrop(&payer.pubkey(), lamports_to_send * 2) + .unwrap() .unwrap(); svm_locker_for_context @@ -3066,6 +3080,7 @@ async fn test_get_local_signatures_with_limit() { svm_locker_for_context .airdrop(&payer.pubkey(), lamports_to_send * 10) + .unwrap() .unwrap(); svm_locker_for_context @@ -3546,8 +3561,14 @@ async fn test_ix_profiling_with_alt_tx() { let p1 = Keypair::new(); let p2 = Keypair::new(); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); - svm_locker.airdrop(&p2.pubkey(), LAMPORTS_PER_SOL).unwrap(); + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); + svm_locker + .airdrop(&p2.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -3825,7 +3846,10 @@ async fn it_should_delete_accounts_with_no_lamports() { let p1 = Keypair::new(); let p2 = Keypair::new(); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -3871,7 +3895,10 @@ async fn test_compute_budget_profiling() { let p1 = Keypair::new(); let p2 = Keypair::new(); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -3933,7 +3960,10 @@ fn test_reset_account() { let svm_locker = SurfnetSvmLocker::new(svm_instance); let p1 = Keypair::new(); println!("P1 pubkey: {}", p1.pubkey()); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); // account is created in the SVM + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); // account is created in the SVM.unwrap() println!("Airdropped SOL to p1"); println!( @@ -4011,7 +4041,10 @@ async fn test_reset_streamed_account() { let svm_locker = SurfnetSvmLocker::new(svm_instance); let p1 = Keypair::new(); println!("P1 pubkey: {}", p1.pubkey()); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); // account is created in the SVM + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); // account is created in the SVM.unwrap() println!("Airdropped SOL to p1"); let _ = svm_locker.confirm_current_block(&None).await; @@ -4335,6 +4368,7 @@ async fn test_ws_signature_subscribe(subscription_type: SignatureSubscriptionTyp let lamports_to_send = 100_000; svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -4401,7 +4435,10 @@ async fn test_ws_signature_subscribe_failed_transaction() { // create a test transaction that will fail (insufficient funds) let payer = Keypair::new(); let recipient = Pubkey::new_unique(); - svm_locker.airdrop(&payer.pubkey(), 10_000).unwrap(); // airdrop a very small amount + svm_locker + .airdrop(&payer.pubkey(), 10_000) + .unwrap() + .unwrap(); // airdrop a very small amount.unwrap() let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); let transfer_ix = system_instruction::transfer(&payer.pubkey(), &recipient, LAMPORTS_PER_SOL); // Try to send more than we have @@ -4459,6 +4496,7 @@ async fn test_ws_signature_subscribe_multiple_subscribers() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -4532,6 +4570,7 @@ async fn test_ws_signature_subscribe_before_transaction_exists() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -4592,6 +4631,7 @@ async fn test_ws_account_subscribe_balance_change() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to payer account updates @@ -4649,6 +4689,7 @@ async fn test_ws_account_subscribe_multiple_changes() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to payer account updates @@ -4710,9 +4751,11 @@ async fn test_ws_account_subscribe_multiple_subscribers() { let sender = Keypair::new(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); svm_locker .airdrop(&sender.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // create multiple subscriptions to the same account @@ -4774,6 +4817,7 @@ async fn test_ws_account_subscribe_new_account_creation() { let new_account = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to an account that doesn't exist yet @@ -4835,6 +4879,7 @@ async fn test_ws_account_subscribe_account_closure() { // give the account some funds svm_locker .airdrop(&account_to_close.pubkey(), 10_000) + .unwrap() .unwrap(); // subscribe to the account @@ -5005,6 +5050,7 @@ async fn test_ws_logs_subscribe_all_transactions() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to all transaction logs @@ -5069,6 +5115,7 @@ async fn test_ws_logs_subscribe_mentions_account() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to logs mentioning the system program @@ -5148,6 +5195,7 @@ async fn test_ws_logs_subscribe_confirmed_commitment() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5210,6 +5258,7 @@ async fn test_ws_logs_subscribe_finalized_commitment() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5269,7 +5318,7 @@ async fn test_ws_logs_subscribe_failed_transaction() { // create test accounts let payer = Keypair::new(); let recipient = Pubkey::new_unique(); - svm_locker.airdrop(&payer.pubkey(), 5_000).unwrap(); + svm_locker.airdrop(&payer.pubkey(), 5_000).unwrap().unwrap(); // subscribe to all logs let logs_rx = svm_locker @@ -5341,6 +5390,7 @@ async fn test_ws_logs_subscribe_multiple_subscribers() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5399,6 +5449,7 @@ async fn test_ws_logs_subscribe_logs_content() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to all logs @@ -5471,9 +5522,11 @@ async fn test_token2022_full_lifecycle() { svm_locker .airdrop(&payer.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); svm_locker .airdrop(&recipient.pubkey(), 1 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5681,9 +5734,11 @@ async fn test_token2022_error_cases() { svm_locker .airdrop(&payer.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); svm_locker .airdrop(&recipient.pubkey(), 1 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5859,9 +5914,11 @@ async fn test_token2022_delegate_operations() { svm_locker .airdrop(&owner.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); svm_locker .airdrop(&delegate.pubkey(), 1 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -6074,6 +6131,7 @@ async fn test_token2022_freeze_thaw() { svm_locker .airdrop(&owner.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); From 38cf26a638cf4d5cf0f0867056eddbda2e4af8b8 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 19 Dec 2025 10:58:19 -0500 Subject: [PATCH 16/54] add test helpers to storage mod to assist in augmenting existing tests to include db --- Cargo.lock | 2 ++ Cargo.toml | 1 + crates/core/Cargo.toml | 1 + crates/core/src/storage/mod.rs | 48 ++++++++++++++++++++++++++++++++++ 4 files changed, 52 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index b11a1be3d..666b02d52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12285,6 +12285,7 @@ dependencies = [ "solana-slot-hashes 3.0.0", "solana-system-interface 2.0.0", "solana-sysvar 3.0.0", + "solana-sysvar-id 3.0.0", "solana-transaction", "solana-transaction-error 3.0.0", "solana-transaction-status", @@ -12295,6 +12296,7 @@ dependencies = [ "surfpool-db", "surfpool-subgraph", "surfpool-types", + "tempfile", "test-case", "thiserror 2.0.16", "tokio", diff --git a/Cargo.toml b/Cargo.toml index baa08e60c..9dec37c5a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,6 +148,7 @@ solana-version = { version = "3.0.0", default-features = false } spl-associated-token-account-interface = { version = "2.0.0", default-features = false } spl-token-2022-interface = { version = "2.0.0", default-features = false } spl-token-interface = { version = "2.0.0", default-features = false } +tempfile = "3.23.0" test-case = "^3.3.1" thiserror = "2.0" tokio = { version = "1.43.0", default-features = false } diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index cfeea1735..b3ec7a7d5 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -102,6 +102,7 @@ txtx-addon-network-svm = { workspace = true } [dev-dependencies] test-case = { workspace = true } env_logger = "*" +tempfile = { workspace = true } [features] default = ["sqlite"] diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index a6ed215b9..a030eed93 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -147,3 +147,51 @@ pub trait StorageConstructor: Storage + Clone { where Self: Sized; } + +#[cfg(test)] +pub mod tests { + use std::os::unix::fs::PermissionsExt; + + pub enum TestType { + NoDb, + InMemorySqlite, + OnDiskSqlite(String), // Include TempDir to keep it alive + } + + impl TestType { + pub fn sqlite() -> Self { + let database_url = crate::storage::tests::create_tmp_sqlite_storage(); + TestType::OnDiskSqlite(database_url) + } + pub fn no_db() -> Self { + TestType::NoDb + } + pub fn in_memory() -> Self { + TestType::InMemorySqlite + } + + pub fn dispose(self) { + if let TestType::OnDiskSqlite(db_path) = self { + // Delete file at db_path + let _ = std::fs::remove_file(db_path); + } + } + } + + pub fn create_tmp_sqlite_storage() -> String { + // let temp_dir = tempfile::tempdir().expect("Failed to create temp dir for SqliteStorage"); + let write_permissions = std::fs::Permissions::from_mode(0o600); + let file = tempfile::Builder::new() + .permissions(write_permissions) + .suffix(".sqlite") + .tempfile() + .expect("Failed to create temp file for SqliteStorage"); + let database_url = file.path().to_path_buf(); + + // Use a simple path without creating the file beforehand + // Let SQLite create the database file itself + let database_url = database_url.to_str().unwrap().to_string(); + println!("Created temporary Sqlite database at: {}", database_url); + database_url + } +} From 059d8b2f82244027b7eee77d26d658d509641b26 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 2 Jan 2026 15:07:17 -0500 Subject: [PATCH 17/54] implement svm `get_all_accounts` to get vec of all accounts from db --- crates/core/src/rpc/surfnet_cheatcodes.rs | 2 +- crates/core/src/surfnet/locker.rs | 40 ++++++++++++----------- crates/core/src/surfnet/svm.rs | 11 +++---- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/crates/core/src/rpc/surfnet_cheatcodes.rs b/crates/core/src/rpc/surfnet_cheatcodes.rs index 47d32bc95..a413f20d5 100644 --- a/crates/core/src/rpc/surfnet_cheatcodes.rs +++ b/crates/core/src/rpc/surfnet_cheatcodes.rs @@ -1797,7 +1797,7 @@ impl SurfnetCheatcodes for SurfnetCheatcodesRpc { ) -> Result>> { let config = config.unwrap_or_default(); let svm_locker = meta.get_svm_locker()?; - let snapshot = svm_locker.export_snapshot(config); + let snapshot = svm_locker.export_snapshot(config)?; Ok(RpcResponse { context: RpcResponseContext::new(svm_locker.get_latest_absolute_slot()), value: snapshot, diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 324709e78..b3bd4766a 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -458,8 +458,8 @@ impl SurfnetSvmLocker { pub fn get_largest_accounts_local( &self, config: RpcLargestAccountsConfig, - ) -> SvmAccessContext> { - self.with_contextualized_svm_reader(|svm_reader| { + ) -> SurfpoolContextualizedResult> { + let res: Vec = self.with_svm_reader(|svm_reader| { let non_circulating_accounts: Vec<_> = svm_reader .non_circulating_accounts .iter() @@ -467,7 +467,8 @@ impl SurfnetSvmLocker { .collect(); let ordered_accounts = svm_reader - .iter_accounts() + .get_all_accounts()? + .into_iter() .sorted_by(|a, b| b.1.lamports().cmp(&a.1.lamports())) .collect::>(); let ordered_filtered_accounts = match config.filter { @@ -482,15 +483,18 @@ impl SurfnetSvmLocker { None => ordered_accounts, }; - ordered_filtered_accounts - .iter() - .take(20) - .map(|(pubkey, account)| RpcAccountBalance { - address: pubkey.to_string(), - lamports: account.lamports(), - }) - .collect() - }) + Ok::, SurfpoolError>( + ordered_filtered_accounts + .iter() + .take(20) + .map(|(pubkey, account)| RpcAccountBalance { + address: pubkey.to_string(), + lamports: account.lamports(), + }) + .collect(), + ) + })?; + Ok(self.with_contextualized_svm_reader(|_| res.to_owned())) } pub async fn get_largest_accounts_local_then_remote( @@ -559,7 +563,7 @@ impl SurfnetSvmLocker { // now that our local cache is aware of all large remote accounts, we can get the largest accounts locally // and filter according to the config - Ok(self.get_largest_accounts_local(config)) + self.get_largest_accounts_local(config) } pub async fn get_largest_accounts( @@ -567,14 +571,12 @@ impl SurfnetSvmLocker { remote_ctx: &Option<(SurfnetRemoteClient, CommitmentConfig)>, config: RpcLargestAccountsConfig, ) -> SurfpoolContextualizedResult> { - let results = if let Some((remote_client, commitment_config)) = remote_ctx { + if let Some((remote_client, commitment_config)) = remote_ctx { self.get_largest_accounts_local_then_remote(remote_client, config, *commitment_config) - .await? + .await } else { self.get_largest_accounts_local(config) - }; - - Ok(results) + } } pub fn account_to_rpc_keyed_account( @@ -2953,7 +2955,7 @@ impl SurfnetSvmLocker { pub fn export_snapshot( &self, config: ExportSnapshotConfig, - ) -> BTreeMap { + ) -> SurfpoolResult> { self.with_svm_reader(|svm_reader| svm_reader.export_snapshot(config)) } diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 0dee6e59e..65abca553 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -2597,9 +2597,8 @@ impl SurfnetSvm { self.inner.get_account(pubkey) } - pub fn iter_accounts(&self) -> std::collections::hash_map::Iter<'_, Pubkey, AccountSharedData> { - todo!() - // self.inner.accounts_db().inner.iter() + pub fn get_all_accounts(&self) -> SurfpoolResult> { + self.inner.get_all_accounts() } pub fn get_transaction( @@ -2634,7 +2633,7 @@ impl SurfnetSvm { pub fn export_snapshot( &self, config: ExportSnapshotConfig, - ) -> BTreeMap { + ) -> SurfpoolResult> { let mut fixtures = BTreeMap::new(); let encoding = if config.include_parsed_accounts.unwrap_or_default() { UiAccountEncoding::JsonParsed @@ -2704,7 +2703,7 @@ impl SurfnetSvm { match &config.scope { ExportSnapshotScope::Network => { // Export all network accounts (current behavior) - for (pubkey, account_shared_data) in self.iter_accounts() { + for (pubkey, account_shared_data) in self.get_all_accounts()? { let account = Account::from(account_shared_data.clone()); process_account(&pubkey, &account); } @@ -2732,7 +2731,7 @@ impl SurfnetSvm { } } - fixtures + Ok(fixtures) } /// Registers a scenario for execution by scheduling its overrides From 9ee501742e497ba39a20d3cdd3505626a1cf5f6d Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 2 Jan 2026 15:07:43 -0500 Subject: [PATCH 18/54] sort results for and document `get_all_accounts` method --- crates/core/src/surfnet/surfnet_lite_svm.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs index c0c8e154b..d07d1b4e1 100644 --- a/crates/core/src/surfnet/surfnet_lite_svm.rs +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use agave_feature_set::FeatureSet; +use itertools::Itertools; use litesvm::{ LiteSVM, types::{FailedTransactionMetadata, SimulatedTransactionInfo, TransactionResult}, @@ -210,6 +211,9 @@ impl SurfnetLiteSvm { Ok(()) } + /// Get all accounts from both the LiteSVM state and the database, merging them together. + /// Accounts in the LiteSVM state take precedence over those in the database. + /// The resulting accounts are sorted by Pubkey. pub fn get_all_accounts(&self) -> SurfpoolResult> { // In general, we trust the LiteSVM state as the most up-to-date source of truth for any given account, // But there's a chance that the account was garbage collected, meaning it exists in the DB but not in the SVM. @@ -227,7 +231,10 @@ impl SurfnetLiteSvm { accounts.insert(*pubkey, account.clone()); } } - Ok(accounts.into_iter().collect()) + Ok(accounts + .into_iter() + .sorted_by(|a, b| a.0.cmp(&b.0)) + .collect()) } } From 801013bbbc2b05f2339c65fcfd12b464be3c12c0 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 2 Jan 2026 15:08:11 -0500 Subject: [PATCH 19/54] clean up tests --- crates/core/src/surfnet/svm.rs | 40 +++++++++++++++------------- crates/core/src/tests/integration.rs | 3 ++- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 65abca553..61cc7e386 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -844,7 +844,9 @@ impl SurfnetSvm { trace!("Nonce account pubkey: {:?}", nonce_account_pubkey,); - let Some(nonce_account) = self.get_account(nonce_account_pubkey) else { + // Here we're swallowing errors in the storage - if we fail to fetch the account because of a storage error, + // we're just considering the blockhash to be invalid. + let Ok(Some(nonce_account)) = self.get_account(nonce_account_pubkey) else { return false; }; trace!("Nonce account: {:?}", nonce_account); @@ -3120,27 +3122,27 @@ mod tests { // GetAccountResult::None should be a noop when writing account updates { - let index_before = svm.inner.get_all_accounts().unwrap(); + let index_before = svm.get_all_accounts().unwrap(); let empty_update = GetAccountResult::None(pubkey); svm.write_account_update(empty_update); - assert_eq!(svm.inner.get_all_accounts().unwrap(), index_before); + assert_eq!(svm.get_all_accounts().unwrap(), index_before); } // GetAccountResult::FoundAccount with `DoUpdateSvm` flag to false should be a noop { - let index_before = svm.inner.get_all_accounts().unwrap(); + let index_before = svm.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount(pubkey, account.clone(), false); svm.write_account_update(found_update); - assert_eq!(svm.inner.get_all_accounts().unwrap(), index_before); + assert_eq!(svm.get_all_accounts().unwrap(), index_before); } // GetAccountResult::FoundAccount with `DoUpdateSvm` flag to true should update the account { - let index_before = svm.inner.get_all_accounts().unwrap(); + let index_before = svm.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount(pubkey, account.clone(), true); svm.write_account_update(found_update); assert_eq!( - svm.inner.get_all_accounts().unwrap().len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event(&events_rx, &svm, &pubkey, &account) { @@ -3174,7 +3176,7 @@ mod tests { rent_epoch: 0, }; - let index_before = svm.inner.get_all_accounts().unwrap(); + let index_before = svm.get_all_accounts().unwrap(); let found_program_account_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, None), @@ -3198,7 +3200,7 @@ mod tests { ); } assert_eq!( - svm.inner.get_all_accounts().unwrap().len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 2 ); } @@ -3208,14 +3210,14 @@ mod tests { let (program_address, program_account, program_data_address, program_data_account) = create_program_accounts(); - let index_before = svm.inner.get_all_accounts().unwrap(); + let index_before = svm.get_all_accounts().unwrap(); let found_program_account_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, Some(program_data_account.clone())), ); svm.write_account_update(found_program_account_update); assert_eq!( - svm.inner.get_all_accounts().unwrap().len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 2 ); if !expect_account_update_event( @@ -3242,7 +3244,7 @@ mod tests { let (program_address, program_account, program_data_address, program_data_account) = create_program_accounts(); - let index_before = svm.inner.get_all_accounts().unwrap(); + let index_before = svm.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount( program_data_address, program_data_account.clone(), @@ -3250,7 +3252,7 @@ mod tests { ); svm.write_account_update(found_update); assert_eq!( - svm.inner.get_all_accounts().unwrap().len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event( @@ -3264,14 +3266,14 @@ mod tests { ); } - let index_before = svm.inner.get_all_accounts().unwrap(); + let index_before = svm.get_all_accounts().unwrap(); let program_account_found_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, None), ); svm.write_account_update(program_account_found_update); assert_eq!( - svm.inner.get_all_accounts().unwrap().len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event(&events_rx, &svm, &program_address, &program_account) { @@ -3742,9 +3744,9 @@ mod tests { svm.set_account(&account_pubkey, account.clone()).unwrap(); - assert!(svm.get_account(&account_pubkey).is_some()); + assert!(svm.get_account(&account_pubkey).unwrap().is_some()); assert!(!svm.closed_accounts.contains(&account_pubkey)); - assert_eq!(svm.get_account_owned_by(&owner).len(), 1); + assert_eq!(svm.get_account_owned_by(&owner).unwrap().len(), 1); let empty_account = Account::default(); svm.update_account_registries(&account_pubkey, &empty_account) @@ -3752,9 +3754,9 @@ mod tests { assert!(svm.closed_accounts.contains(&account_pubkey)); - assert_eq!(svm.get_account_owned_by(&owner).len(), 0); + assert_eq!(svm.get_account_owned_by(&owner).unwrap().len(), 0); - let owned_accounts = svm.get_account_owned_by(&owner); + let owned_accounts = svm.get_account_owned_by(&owner).unwrap(); assert!(!owned_accounts.iter().any(|(pk, _)| *pk == account_pubkey)); } diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index 47ff149ba..d0e9c50cd 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -1,4 +1,4 @@ -use std::{str::FromStr, sync::Arc, thread::sleep, time::Duration}; +use std::{str::FromStr, sync::Arc, time::Duration}; use base64::Engine; use crossbeam_channel::{unbounded, unbounded as crossbeam_unbounded}; @@ -6354,6 +6354,7 @@ fn test_nonce_accounts() { svm_locker .airdrop(&payer.pubkey(), 5 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let nonce_rent = svm_locker.with_svm_reader(|svm_reader| { From 17ddb31ae7560622e2f3e871477f526c91d17dad Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 2 Jan 2026 15:52:56 -0500 Subject: [PATCH 20/54] upgrade svm/integration tests to use all db types for tests --- crates/core/src/storage/mod.rs | 23 +- crates/core/src/surfnet/svm.rs | 155 +++++---- crates/core/src/tests/integration.rs | 449 +++++++++++++++++++-------- 3 files changed, 432 insertions(+), 195 deletions(-) diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index a030eed93..768397cbd 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -152,13 +152,28 @@ pub trait StorageConstructor: Storage + Clone { pub mod tests { use std::os::unix::fs::PermissionsExt; + use crossbeam_channel::Receiver; + use surfpool_types::SimnetEvent; + + use crate::surfnet::{GeyserEvent, svm::SurfnetSvm}; + pub enum TestType { NoDb, InMemorySqlite, - OnDiskSqlite(String), // Include TempDir to keep it alive + OnDiskSqlite(String), } impl TestType { + pub fn initialize_svm(&self) -> (SurfnetSvm, Receiver, Receiver) { + match &self { + TestType::NoDb => SurfnetSvm::new(), + TestType::InMemorySqlite => SurfnetSvm::new_with_db(Some(":memory:")).unwrap(), + TestType::OnDiskSqlite(db_path) => { + SurfnetSvm::new_with_db(Some(db_path.as_ref())).unwrap() + } + } + } + pub fn sqlite() -> Self { let database_url = crate::storage::tests::create_tmp_sqlite_storage(); TestType::OnDiskSqlite(database_url) @@ -169,10 +184,12 @@ pub mod tests { pub fn in_memory() -> Self { TestType::InMemorySqlite } + } - pub fn dispose(self) { + impl Drop for TestType { + fn drop(&mut self) { if let TestType::OnDiskSqlite(db_path) = self { - // Delete file at db_path + // Delete file at db_path when TestType goes out of scope let _ = std::fs::remove_file(db_path); } } diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 61cc7e386..5bd4f8667 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -2785,12 +2785,17 @@ mod tests { use solana_loader_v3_interface::get_program_data_address; use solana_program_pack::Pack; use spl_token_interface::state::{Account as TokenAccount, AccountState}; + use test_case::test_case; + + use crate::storage::tests::TestType; use super::*; - #[test] - fn test_synthetic_blockhash_generation() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_synthetic_blockhash_generation(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Test with different chain tip indices let test_cases = vec![0, 1, 42, 255, 1000, 0x12345678]; @@ -2849,9 +2854,11 @@ mod tests { println!("Generated hash: {}", hash_str); } - #[test] - fn test_blockhash_consistency_across_calls() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_blockhash_consistency_across_calls(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Set a specific chain tip svm.chain_tip = BlockIdentifier::new(123, "initial_hash"); @@ -2881,9 +2888,11 @@ mod tests { } } - #[test] - fn test_token_account_indexing() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_token_account_indexing(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let owner = Pubkey::new_unique(); let delegate = Pubkey::new_unique(); @@ -2933,9 +2942,11 @@ mod tests { assert_eq!(mint_accounts[0].0, token_account_pubkey); } - #[test] - fn test_account_update_removes_old_indexes() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_account_update_removes_old_indexes(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let owner = Pubkey::new_unique(); let old_delegate = Pubkey::new_unique(); @@ -3003,9 +3014,11 @@ mod tests { assert_eq!(svm.get_parsed_token_accounts_by_owner(&owner).len(), 1); } - #[test] - fn test_non_token_accounts_not_indexed() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_non_token_accounts_not_indexed(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let system_account_pubkey = Pubkey::new_unique(); let account = Account { @@ -3107,9 +3120,11 @@ mod tests { ) } - #[test] - fn test_inserting_account_updates() { - let (mut svm, events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_inserting_account_updates(test_type: TestType) { + let (mut svm, events_rx, _geyser_rx) = test_type.initialize_svm(); let pubkey = Pubkey::new_unique(); let account = Account { @@ -3284,9 +3299,11 @@ mod tests { } } - #[test] - fn test_encode_ui_account() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_encode_ui_account(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let idl_v1: Idl = serde_json::from_slice(&include_bytes!("../tests/assets/idl_v1.json").to_vec()) @@ -3512,18 +3529,22 @@ mod tests { } } - #[test] - fn test_profiling_map_capacity_default() { - let (svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_profiling_map_capacity_default(test_type: TestType) { + let (svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); assert_eq!( svm.executed_transaction_profiles.capacity(), DEFAULT_PROFILING_MAP_CAPACITY ); } - #[test] - fn test_profiling_map_capacity_set() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_profiling_map_capacity_set(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); svm.set_profiling_map_capacity(10); assert_eq!(svm.executed_transaction_profiles.capacity(), 10); } @@ -3558,18 +3579,22 @@ mod tests { assert_ne!(loader_v4_id, disable_fees_id); } - #[test] - fn test_apply_feature_config_empty() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_apply_feature_config_empty(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::new(); // Should not panic with empty config svm.apply_feature_config(&config); } - #[test] - fn test_apply_feature_config_enable_feature() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_apply_feature_config_enable_feature(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Disable a feature first let feature_id = enable_loader_v4::id(); @@ -3583,9 +3608,11 @@ mod tests { assert!(svm.feature_set.is_active(&feature_id)); } - #[test] - fn test_apply_feature_config_disable_feature() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_apply_feature_config_disable_feature(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Feature should be active by default (all_enabled) let feature_id = disable_fees_sysvar::id(); @@ -3598,9 +3625,11 @@ mod tests { assert!(!svm.feature_set.is_active(&feature_id)); } - #[test] - fn test_apply_feature_config_mainnet_defaults() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_apply_feature_config_mainnet_defaults(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::default_mainnet_features(); svm.apply_feature_config(&config); @@ -3642,9 +3671,11 @@ mod tests { ); } - #[test] - fn test_apply_feature_config_mainnet_with_override() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_apply_feature_config_mainnet_with_override(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Start with mainnet defaults, but enable loader v4 let config = @@ -3663,9 +3694,11 @@ mod tests { ); } - #[test] - fn test_apply_feature_config_multiple_changes() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_apply_feature_config_multiple_changes(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::new() .enable(SvmFeature::EnableLoaderV4) @@ -3684,9 +3717,11 @@ mod tests { assert!(!svm.feature_set.is_active(&blake3_syscall_enabled::id())); } - #[test] - fn test_apply_feature_config_preserves_native_mint() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_apply_feature_config_preserves_native_mint(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Native mint should exist before assert!( @@ -3708,9 +3743,11 @@ mod tests { ); } - #[test] - fn test_apply_feature_config_idempotent() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_apply_feature_config_idempotent(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::new() .enable(SvmFeature::EnableLoaderV4) @@ -3727,9 +3764,11 @@ mod tests { // Garbage collection tests - #[test] - fn test_garbage_collected_account_tracking() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_garbage_collected_account_tracking(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let owner = Pubkey::new_unique(); let account_pubkey = Pubkey::new_unique(); @@ -3760,9 +3799,11 @@ mod tests { assert!(!owned_accounts.iter().any(|(pk, _)| *pk == account_pubkey)); } - #[test] - fn test_garbage_collected_token_account_cleanup() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + fn test_garbage_collected_token_account_cleanup(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let token_owner = Pubkey::new_unique(); let delegate = Pubkey::new_unique(); diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index d0e9c50cd..ecb7dcabe 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -53,6 +53,7 @@ use crate::{ surfnet_cheatcodes::{SurfnetCheatcodes, SurfnetCheatcodesRpc}, }, runloops::start_local_surfnet_runloop, + storage::tests::TestType, surfnet::{SignatureSubscriptionType, locker::SurfnetSvmLocker, svm::SurfnetSvm}, tests::helpers::get_free_port, types::{TimeTravelConfig, TransactionLoadedAddresses}, @@ -78,8 +79,11 @@ fn wait_for_ready_and_connected(simnet_events_rx: &crossbeam_channel::Receiver>(); let airdrop_addresses: Vec = airdrop_keypairs.iter().map(|kp| kp.pubkey()).collect(); @@ -195,7 +205,7 @@ async fn test_simnet_some_sol_transfers() { ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = SurfnetSvmLocker::new(surfnet_svm); @@ -324,8 +334,11 @@ async fn test_simnet_some_sol_transfers() { // However, we are not actually setting up a tx that will use the lookup table internally, // we are kind of just trusting that LiteSVM will do its job here. #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_add_alt_entries_fetching() { +async fn test_add_alt_entries_fetching(test_type: TestType) { let payer = Keypair::new(); let pk = payer.pubkey(); @@ -347,7 +360,7 @@ async fn test_add_alt_entries_fetching() { ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = Arc::new(RwLock::new(surfnet_svm)); @@ -491,8 +504,11 @@ async fn test_add_alt_entries_fetching() { // However, we are not actually setting up a tx that will use the lookup table internally, // we are kind of just trusting that LiteSVM will do its job here. #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_simulate_add_alt_entries_fetching() { +async fn test_simulate_add_alt_entries_fetching(test_type: TestType) { let payer = Keypair::new(); let pk = payer.pubkey(); @@ -514,7 +530,7 @@ async fn test_simulate_add_alt_entries_fetching() { ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = Arc::new(RwLock::new(surfnet_svm)); @@ -601,9 +617,13 @@ async fn test_simulate_add_alt_entries_fetching() { "Unexpected simulation error" ); } + #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_simulate_transaction_no_signers() { +async fn test_simulate_transaction_no_signers(test_type: TestType) { let payer = Keypair::new(); let pk = payer.pubkey(); let lamports = LAMPORTS_PER_SOL; @@ -626,7 +646,7 @@ async fn test_simulate_transaction_no_signers() { ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = Arc::new(RwLock::new(surfnet_svm)); @@ -691,9 +711,12 @@ async fn test_simulate_transaction_no_signers() { } #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_surfnet_estimate_compute_units() { - let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_surfnet_estimate_compute_units(test_type: TestType) { + let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let rpc_server = crate::rpc::surfnet_cheatcodes::SurfnetCheatcodesRpc; let payer = Keypair::new(); @@ -944,7 +967,7 @@ async fn test_surfnet_estimate_compute_units() { // Test send_transaction with cu_analysis_enabled = true // Create a new SVM instance - let (mut svm_for_send, simnet_rx_for_send, _geyser_rx_for_send) = SurfnetSvm::new(); + let (mut svm_for_send, simnet_rx_for_send, _geyser_rx_for_send) = test_type.initialize_svm(); svm_for_send .airdrop(&payer.pubkey(), lamports_to_send * 2) .unwrap() @@ -978,10 +1001,13 @@ async fn test_surfnet_estimate_compute_units() { assert!(found_cu_event, "Did not find CU estimation SimnetEvent"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_get_transaction_profile() { +async fn test_get_transaction_profile(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; - let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); // Set up test accounts let payer = Keypair::new(); @@ -1194,11 +1220,13 @@ async fn test_get_transaction_profile() { println!("All get_transaction_profile tests passed successfully!"); } -#[test] -fn test_register_and_get_idl_without_slot() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_register_and_get_idl_without_slot(test_type: TestType) { let idl: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v1.json")).unwrap(); let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance); let (simnet_cmd_tx, _simnet_cmd_rx) = crossbeam_unbounded::(); @@ -1245,11 +1273,13 @@ fn test_register_and_get_idl_without_slot() { println!("All IDL registration and retrieval tests passed successfully!"); } -#[test] -fn test_register_and_get_idl_with_slot() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_register_and_get_idl_with_slot(test_type: TestType) { let idl: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v1.json")).unwrap(); let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance); let (simnet_cmd_tx, _simnet_cmd_rx) = crossbeam_unbounded::(); @@ -1306,13 +1336,16 @@ fn test_register_and_get_idl_with_slot() { println!("All IDL registration and retrieval tests passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_register_and_get_same_idl_with_different_slots() { +async fn test_register_and_get_same_idl_with_different_slots(test_type: TestType) { let idl_v1: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v1.json")).unwrap(); let idl_v2: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v2.json")).unwrap(); let idl_v3: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v3.json")).unwrap(); let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance); @@ -1455,10 +1488,13 @@ async fn test_register_and_get_same_idl_with_different_slots() { println!("All IDL registration and retrieval tests at different slots passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_basic() { +async fn test_profile_transaction_basic(test_type: TestType) { // Set up test environment - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -1539,9 +1575,12 @@ async fn test_profile_transaction_basic() { println!("Basic transaction profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_multi_instruction_basic() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_multi_instruction_basic(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -1932,10 +1971,13 @@ async fn test_profile_transaction_multi_instruction_basic() { } } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_with_tag() { +async fn test_profile_transaction_with_tag(test_type: TestType) { // Set up test environment - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2089,9 +2131,12 @@ async fn test_profile_transaction_with_tag() { println!("Tag-based transaction profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_token_transfer() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_token_transfer(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2525,9 +2570,12 @@ async fn test_profile_transaction_token_transfer() { // println!("Token transfer profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_insufficient_funds() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_insufficient_funds(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts with insufficient funds @@ -2591,9 +2639,12 @@ async fn test_profile_transaction_insufficient_funds() { println!("Insufficient funds profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_multi_instruction_failure() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_multi_instruction_failure(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2671,9 +2722,12 @@ async fn test_profile_transaction_multi_instruction_failure() { println!("Multi-instruction failure profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_with_encoding() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_with_encoding(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2739,9 +2793,12 @@ async fn test_profile_transaction_with_encoding() { println!("Encoding profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_with_tag_and_retrieval() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_with_tag_and_retrieval(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2839,9 +2896,12 @@ async fn test_profile_transaction_with_tag_and_retrieval() { println!("Tag and retrieval profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_empty_instruction() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_empty_instruction(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2895,9 +2955,12 @@ async fn test_profile_transaction_empty_instruction() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_versioned_message() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_versioned_message(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2956,10 +3019,13 @@ async fn test_profile_transaction_versioned_message() { println!("Versioned message profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_get_local_signatures_without_limit() { +async fn test_get_local_signatures_without_limit(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance.clone()); @@ -3056,11 +3122,13 @@ async fn test_get_local_signatures_without_limit() { assert!(local_signatures.len() > 0); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_get_local_signatures_with_limit() { +async fn test_get_local_signatures_with_limit(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); - + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance.clone()); let (simnet_cmd_tx, _simnet_cmd_rx) = crossbeam_unbounded::(); @@ -3203,6 +3271,7 @@ async fn test_get_local_signatures_with_limit() { fn boot_simnet( block_production_mode: BlockProductionMode, slot_time: Option, + test_type: TestType, ) -> ( SurfnetSvmLocker, crossbeam_channel::Sender, @@ -3225,7 +3294,7 @@ fn boot_simnet( ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = SurfnetSvmLocker::new(surfnet_svm); @@ -3255,10 +3324,13 @@ fn boot_simnet( (svm_locker, simnet_commands_tx, simnet_events_rx) } -#[test] -fn test_time_travel_resume_paused_clock() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_time_travel_resume_paused_clock(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; - let (svm_locker, simnet_cmd_tx, _) = boot_simnet(BlockProductionMode::Clock, Some(100)); + let (svm_locker, simnet_cmd_tx, _) = + boot_simnet(BlockProductionMode::Clock, Some(100), test_type); let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); let runloop_context = RunloopContext { @@ -3327,12 +3399,17 @@ fn test_time_travel_resume_paused_clock() { println!("Resume clock test passed successfully!"); } -#[test] -fn test_time_travel_absolute_timestamp() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_time_travel_absolute_timestamp(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; let slot_time = 100; - let (svm_locker, simnet_cmd_tx, simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(slot_time.clone())); + let (svm_locker, simnet_cmd_tx, simnet_events_rx) = boot_simnet( + BlockProductionMode::Clock, + Some(slot_time.clone()), + test_type, + ); let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); let runloop_context = RunloopContext { @@ -3406,11 +3483,13 @@ fn test_time_travel_absolute_timestamp() { println!("Time travel to absolute timestamp test passed successfully!"); } -#[test] -fn test_time_travel_absolute_slot() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_time_travel_absolute_slot(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; let (svm_locker, simnet_cmd_tx, simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); let runloop_context = RunloopContext { @@ -3478,11 +3557,13 @@ fn test_time_travel_absolute_slot() { println!("Time travel to absolute slot test passed successfully!"); } -#[test] -fn test_time_travel_absolute_epoch() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_time_travel_absolute_epoch(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; let (svm_locker, simnet_cmd_tx, simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); let runloop_context = RunloopContext { @@ -3553,10 +3634,13 @@ fn test_time_travel_absolute_epoch() { println!("Time travel to absolute epoch test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ix_profiling_with_alt_tx() { +async fn test_ix_profiling_with_alt_tx(test_type: TestType) { let (svm_locker, _simnet_cmd_tx, _simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let p1 = Keypair::new(); let p2 = Keypair::new(); @@ -3838,11 +3922,13 @@ async fn test_ix_profiling_with_alt_tx() { } } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn it_should_delete_accounts_with_no_lamports() { +async fn it_should_delete_accounts_with_no_lamports(test_type: TestType) { let (svm_locker, _simnet_cmd_tx, _simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); - + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let p1 = Keypair::new(); let p2 = Keypair::new(); @@ -3887,11 +3973,13 @@ async fn it_should_delete_accounts_with_no_lamports() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_compute_budget_profiling() { +async fn test_compute_budget_profiling(test_type: TestType) { let (svm_locker, _simnet_cmd_tx, _simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); - + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let p1 = Keypair::new(); let p2 = Keypair::new(); @@ -3954,9 +4042,11 @@ async fn test_compute_budget_profiling() { assert_eq!(ix.compute_units_consumed, 150); } -#[test] -fn test_reset_account() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_reset_account(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let p1 = Keypair::new(); println!("P1 pubkey: {}", p1.pubkey()); @@ -3986,9 +4076,11 @@ fn test_reset_account() { ); } -#[test] -fn test_reset_account_cascade() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_reset_account_cascade(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Create owner account and owned account @@ -4035,9 +4127,12 @@ fn test_reset_account_cascade() { svm_locker.reset_account(owned, false).unwrap(); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_reset_streamed_account() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_reset_streamed_account(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let p1 = Keypair::new(); println!("P1 pubkey: {}", p1.pubkey()); @@ -4061,9 +4156,12 @@ async fn test_reset_streamed_account() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_reset_streamed_account_cascade() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_reset_streamed_account_cascade(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Create owner account and owned account @@ -4112,9 +4210,11 @@ async fn test_reset_streamed_account_cascade() { assert!(svm_locker.get_account_local(&owned).inner.is_none()); } -#[test] -fn test_reset_network() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +fn test_reset_network(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Create owner account and owned account @@ -4164,6 +4264,7 @@ fn test_reset_network() { fn start_surfnet( airdrop_addresses: Vec, datasource_rpc_url: Option, + test_type: TestType, ) -> Result<(String, SurfnetSvmLocker), String> { let bind_host = "127.0.0.1"; let bind_port = get_free_port().unwrap(); @@ -4186,7 +4287,7 @@ fn start_surfnet( ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = SurfnetSvmLocker::new(surfnet_svm); @@ -4227,19 +4328,28 @@ fn start_surfnet( Ok((format!("http://{}:{}", bind_host, bind_port), svm_locker)) } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] #[tokio::test(flavor = "multi_thread")] -async fn test_closed_accounts() { +async fn test_closed_accounts(test_type: TestType) { let keypair = Keypair::new(); let pubkey = keypair.pubkey(); + let another_test_type = match &test_type { + TestType::OnDiskSqlite(_) => TestType::sqlite(), + TestType::InMemorySqlite => TestType::in_memory(), + TestType::NoDb => TestType::no_db(), + }; // Start datasource surfnet first, which will only have accounts we airdrop to let (datasource_surfnet_url, _datasource_svm_locker) = - start_surfnet(vec![pubkey], None).expect("Failed to start datasource surfnet"); + start_surfnet(vec![pubkey], None, test_type).expect("Failed to start datasource surfnet"); println!("Datasource surfnet started at {}", datasource_surfnet_url); // Now start the test surfnet which forks the datasource surfnet let (surfnet_url, surfnet_svm_locker) = - start_surfnet(vec![], Some(datasource_surfnet_url)).expect("Failed to start surfnet"); + start_surfnet(vec![], Some(datasource_surfnet_url), another_test_type) + .expect("Failed to start surfnet"); println!("Surfnet started at {}", surfnet_url); let rpc_client = RpcClient::new(surfnet_url); @@ -4422,14 +4532,17 @@ async fn test_ws_signature_subscribe(subscription_type: SignatureSubscriptionTyp ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_signature_subscribe_failed_transaction() { +async fn test_ws_signature_subscribe_failed_transaction(test_type: TestType) { use crossbeam_channel::unbounded; use solana_system_interface::instruction as system_instruction; use crate::surfnet::SignatureSubscriptionType; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create a test transaction that will fail (insufficient funds) @@ -4481,14 +4594,17 @@ async fn test_ws_signature_subscribe_failed_transaction() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_signature_subscribe_multiple_subscribers() { +async fn test_ws_signature_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; use solana_system_interface::instruction as system_instruction; use crate::surfnet::SignatureSubscriptionType; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create a test transaction @@ -4556,14 +4672,17 @@ async fn test_ws_signature_subscribe_multiple_subscribers() { println!("✓ Multiple subscribers all received notifications correctly"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_signature_subscribe_before_transaction_exists() { +async fn test_ws_signature_subscribe_before_transaction_exists(test_type: TestType) { use crossbeam_channel::unbounded; use solana_system_interface::instruction as system_instruction; use crate::surfnet::SignatureSubscriptionType; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -4618,12 +4737,15 @@ async fn test_ws_signature_subscribe_before_transaction_exists() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_balance_change() { +async fn test_ws_account_subscribe_balance_change(test_type: TestType) { use crossbeam_channel::unbounded; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create and fund a new account @@ -4675,13 +4797,16 @@ async fn test_ws_account_subscribe_balance_change() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_multiple_changes() { +async fn test_ws_account_subscribe_multiple_changes(test_type: TestType) { use crossbeam_channel::unbounded; use solana_account_decoder::UiAccountEncoding; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create and fund a new account @@ -4738,13 +4863,16 @@ async fn test_ws_account_subscribe_multiple_changes() { } } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_multiple_subscribers() { +async fn test_ws_account_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; use solana_account_decoder::UiAccountEncoding; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -4804,13 +4932,16 @@ async fn test_ws_account_subscribe_multiple_subscribers() { println!("✓ All 3 subscribers received notifications for account change"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_new_account_creation() { +async fn test_ws_account_subscribe_new_account_creation(test_type: TestType) { use crossbeam_channel::unbounded; use solana_account_decoder::UiAccountEncoding; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -4864,13 +4995,16 @@ async fn test_ws_account_subscribe_new_account_creation() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_account_closure() { +async fn test_ws_account_subscribe_account_closure(test_type: TestType) { use crossbeam_channel::unbounded; use solana_account_decoder::UiAccountEncoding; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let account_to_close = Keypair::new(); @@ -4918,12 +5052,15 @@ async fn test_ws_account_subscribe_account_closure() { println!("✓ Received notification for account closure"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_slot_subscribe_basic() { +async fn test_ws_slot_subscribe_basic(test_type: TestType) { use surfpool_types::types::BlockProductionMode; let (svm_locker, _simnet_commands_tx, _simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(100)); + boot_simnet(BlockProductionMode::Clock, Some(100), test_type); // subscribe to slot updates let slot_rx = svm_locker.subscribe_for_slot_updates(); @@ -4949,9 +5086,12 @@ async fn test_ws_slot_subscribe_basic() { println!("✓ Slot updates are progressing correctly"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_slot_subscribe_manual_advancement() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_ws_slot_subscribe_manual_advancement(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // subscribe to slot updates @@ -4980,9 +5120,12 @@ async fn test_ws_slot_subscribe_manual_advancement() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_slot_subscribe_multiple_subscribers() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_ws_slot_subscribe_multiple_subscribers(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create multiple subscriptions @@ -5010,9 +5153,12 @@ async fn test_ws_slot_subscribe_multiple_subscribers() { println!("✓ All 3 subscribers received slot update notifications"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_slot_subscribe_multiple_slot_changes() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_ws_slot_subscribe_multiple_slot_changes(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let slot_rx = svm_locker.subscribe_for_slot_updates(); @@ -5037,13 +5183,16 @@ async fn test_ws_slot_subscribe_multiple_slot_changes() { } } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_all_transactions() { +async fn test_ws_logs_subscribe_all_transactions(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -5101,14 +5250,17 @@ async fn test_ws_logs_subscribe_all_transactions() { println!("✓ Received logs update for transaction: {}", signature); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_mentions_account() { +async fn test_ws_logs_subscribe_mentions_account(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -5176,14 +5328,17 @@ async fn test_ws_logs_subscribe_mentions_account() { println!("✓ Did not receive logs notification for transaction not mentioning token program"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_confirmed_commitment() { +async fn test_ws_logs_subscribe_confirmed_commitment(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // subscribe to confirmed logs @@ -5237,8 +5392,11 @@ async fn test_ws_logs_subscribe_confirmed_commitment() { println!("✓ Received confirmed logs notification at slot {}", slot); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_finalized_commitment() { +async fn test_ws_logs_subscribe_finalized_commitment(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; @@ -5246,7 +5404,7 @@ async fn test_ws_logs_subscribe_finalized_commitment() { use crate::surfnet::FINALIZATION_SLOT_THRESHOLD; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // subscribe to finalized logs @@ -5305,14 +5463,17 @@ async fn test_ws_logs_subscribe_finalized_commitment() { println!("✓ Received finalized logs notification at slot {}", slot); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_failed_transaction() { +async fn test_ws_logs_subscribe_failed_transaction(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create test accounts @@ -5367,14 +5528,17 @@ async fn test_ws_logs_subscribe_failed_transaction() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_multiple_subscribers() { +async fn test_ws_logs_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create multiple subscriptions with different commitment levels @@ -5434,14 +5598,17 @@ async fn test_ws_logs_subscribe_multiple_subscribers() { println!("✓ All subscribers received logs notifications at their respective commitment levels"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_logs_content() { +async fn test_ws_logs_subscribe_logs_content(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create test accounts @@ -5505,14 +5672,17 @@ async fn test_ws_logs_subscribe_logs_content() { /// Token-2022 lifecycle: /// create mint → initialize → create ATA → mint → transfer → burn → close account +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_token2022_full_lifecycle() { +async fn test_token2022_full_lifecycle(test_type: TestType) { use solana_system_interface::instruction as system_instruction; use spl_token_2022_interface::instruction::{ burn, close_account, initialize_mint2, mint_to, transfer_checked, }; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -5717,14 +5887,17 @@ async fn test_token2022_full_lifecycle() { } /// Token-2022 error cases: transfer/burn > balance and close with balance. +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_token2022_error_cases() { +async fn test_token2022_error_cases(test_type: TestType) { use solana_system_interface::instruction as system_instruction; use spl_token_2022_interface::instruction::{ burn, close_account, initialize_mint2, mint_to, transfer_checked, }; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -5896,14 +6069,17 @@ async fn test_token2022_error_cases() { } /// Token-2022 delegate operations: approve, delegated transfer, revoke. +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_token2022_delegate_operations() { +async fn test_token2022_delegate_operations(test_type: TestType) { use solana_system_interface::instruction as system_instruction; use spl_token_2022_interface::instruction::{ approve, initialize_mint2, mint_to, revoke, transfer_checked, }; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let owner = Keypair::new(); @@ -6114,14 +6290,17 @@ async fn test_token2022_delegate_operations() { } /// Token-2022 freeze/thaw operations. +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] #[tokio::test(flavor = "multi_thread")] -async fn test_token2022_freeze_thaw() { +async fn test_token2022_freeze_thaw(test_type: TestType) { use solana_system_interface::instruction as system_instruction; use spl_token_2022_interface::instruction::{ freeze_account, initialize_mint2, mint_to, thaw_account, transfer_checked, }; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let owner = Keypair::new(); From 129338babc5eb09c6034cd47eb21e70d65e50a4c Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 5 Jan 2026 20:52:11 -0500 Subject: [PATCH 21/54] chore: make `remove_from_indexes` return result --- crates/core/src/surfnet/svm.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 5bd4f8667..cb1976175 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -908,14 +908,14 @@ impl SurfnetSvm { if account == &Account::default() { self.closed_accounts.insert(*pubkey); if let Some(old_account) = self.get_account(pubkey)? { - self.remove_from_indexes(pubkey, &old_account); + self.remove_from_indexes(pubkey, &old_account)?; } return Ok(()); } // only update our indexes if the account exists in the svm accounts db if let Some(old_account) = self.get_account(pubkey)? { - self.remove_from_indexes(pubkey, &old_account); + self.remove_from_indexes(pubkey, &old_account)?; } // add to owner index (check for duplicates) let owner_accounts = self.accounts_by_owner.entry(account.owner).or_default(); @@ -988,7 +988,11 @@ impl SurfnetSvm { Ok(()) } - fn remove_from_indexes(&mut self, pubkey: &Pubkey, old_account: &Account) { + fn remove_from_indexes( + &mut self, + pubkey: &Pubkey, + old_account: &Account, + ) -> SurfpoolResult<()> { if let Some(accounts) = self.accounts_by_owner.get_mut(&old_account.owner) { accounts.retain(|pk| pk != pubkey); if accounts.is_empty() { @@ -1031,6 +1035,7 @@ impl SurfnetSvm { } } } + Ok(()) } pub fn reset_network(&mut self) -> SurfpoolResult<()> { @@ -1109,7 +1114,7 @@ impl SurfnetSvm { account: &Account, pubkey: &Pubkey, ) -> SurfpoolResult<()> { - self.remove_from_indexes(pubkey, account); + self.remove_from_indexes(pubkey, account)?; // Set the empty account self.inner From 284025c945f8ba16411c1bf025cfd4134f270a56 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 5 Jan 2026 20:54:00 -0500 Subject: [PATCH 22/54] fix account registry update by propagating deletions to database --- crates/core/src/surfnet/surfnet_lite_svm.rs | 9 ++++++++- crates/core/src/surfnet/svm.rs | 16 +++++++++++++--- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs index d07d1b4e1..d54820a39 100644 --- a/crates/core/src/surfnet/surfnet_lite_svm.rs +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -13,7 +13,7 @@ use solana_pubkey::Pubkey; use solana_transaction::versioned::VersionedTransaction; use crate::{ - error::SurfpoolResult, + error::{SurfpoolError, SurfpoolResult}, storage::{SqliteStorage, Storage, StorageConstructor}, surfnet::{GetAccountResult, locker::is_supported_token_program}, }; @@ -211,6 +211,13 @@ impl SurfnetLiteSvm { Ok(()) } + pub fn delete_account_in_db(&mut self, pubkey: &Pubkey) -> SurfpoolResult<()> { + if let Some(db) = &mut self.db { + db.take(&pubkey.to_string())?; + } + Ok(()) + } + /// Get all accounts from both the LiteSVM state and the database, merging them together. /// Accounts in the LiteSVM state take precedence over those in the database. /// The resulting accounts are sorted by Pubkey. diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index cb1976175..ac2b270c2 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -902,10 +902,20 @@ impl SurfnetSvm { pubkey: &Pubkey, account: &Account, ) -> SurfpoolResult<()> { - self.inner - .set_account_in_db(*pubkey, account.clone().into())?; + let is_deleted_account = account == &Account::default(); + + // When this function is called after processing a transaction, the account is already updated + // in the inner SVM. However, the database hasn't been updated yet, so we need to manually update the db. + if is_deleted_account { + // This amounts to deleting the account from the db if the account is deleted in the SVM + self.inner.delete_account_in_db(pubkey)?; + } else { + // Or updating the db account to match the SVM account if not deleted + self.inner + .set_account_in_db(*pubkey, account.clone().into())?; + } - if account == &Account::default() { + if is_deleted_account { self.closed_accounts.insert(*pubkey); if let Some(old_account) = self.get_account(pubkey)? { self.remove_from_indexes(pubkey, &old_account)?; From 1ff7911126fb8d40e6d6e108e3af2adf850b585c Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 5 Jan 2026 20:54:41 -0500 Subject: [PATCH 23/54] add delete_account fn to SurfnetLiteSvm; propagate account purging to delete db accounts --- crates/core/src/surfnet/surfnet_lite_svm.rs | 11 +++++++++++ crates/core/src/surfnet/svm.rs | 5 +---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs index d54820a39..6aa62129f 100644 --- a/crates/core/src/surfnet/surfnet_lite_svm.rs +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -200,6 +200,17 @@ impl SurfnetLiteSvm { Ok(()) } + pub fn delete_account(&mut self, pubkey: &Pubkey) -> SurfpoolResult<()> { + self.delete_account_in_db(pubkey)?; + + // You can't delete an account using the LiteSvm, so we set it to an empty account + // so it can be garbage collected later + self.svm + .set_account(*pubkey, Account::default()) + .map_err(|e| SurfpoolError::set_account(*pubkey, e))?; + Ok(()) + } + pub fn set_account_in_db( &mut self, pubkey: Pubkey, diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index ac2b270c2..c55da3bf8 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -1126,10 +1126,7 @@ impl SurfnetSvm { ) -> SurfpoolResult<()> { self.remove_from_indexes(pubkey, account)?; - // Set the empty account - self.inner - .set_account(*pubkey, Account::default()) - .map_err(|e| SurfpoolError::set_account(*pubkey, e))?; + self.inner.delete_account(pubkey)?; Ok(()) } From 2ca8793542fe791ec9c47e54a7ca2e71bb695d18 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 5 Jan 2026 20:56:47 -0500 Subject: [PATCH 24/54] fix: skip db when getting post-execution accounts --- crates/core/src/surfnet/locker.rs | 5 ++--- crates/core/src/surfnet/surfnet_lite_svm.rs | 4 ++++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index b3bd4766a..a5ede68bf 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -1386,9 +1386,8 @@ impl SurfnetSvmLocker { let post_execution_capture = self.with_svm_writer(|svm_writer| { let accounts_after = pubkeys_from_message .iter() - .map(|p| svm_writer.inner.get_account(p)) - .collect::>>>()?; - + .map(|p| svm_writer.inner.get_account_no_db(p)) + .collect::>>(); let (sanitized_transaction, versioned_transaction) = if do_propagate { ( SanitizedTransaction::try_create( diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs index 6aa62129f..6ad794e58 100644 --- a/crates/core/src/surfnet/surfnet_lite_svm.rs +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -144,6 +144,10 @@ impl SurfnetLiteSvm { self.svm.airdrop(pubkey, lamports) } + pub fn get_account_no_db(&self, pubkey: &Pubkey) -> Option { + self.svm.get_account(pubkey) + } + pub fn get_account(&self, pubkey: &Pubkey) -> SurfpoolResult> { if let Some(account) = self.svm.get_account(pubkey) { return Ok(Some(account)); From af932683bd46e8c8f6f4985d53eb21b5a7d3b2c2 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 5 Jan 2026 20:57:12 -0500 Subject: [PATCH 25/54] fix: sqlite connection strings to add read-write-create mode --- crates/core/src/storage/sqlite.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index 811ed6487..b6b5315bd 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -291,7 +291,19 @@ where "Connecting to SQLite database: {} with table: {}", database_url, table_name ); - let manager = ConnectionManager::::new(database_url); + + let connection_string = if database_url != ":memory:" { + // Add connection string parameters to avoid readonly issues + if database_url.contains('?') { + format!("{}&mode=rwc", database_url) + } else { + format!("{}?mode=rwc", database_url) + } + } else { + database_url.to_string() + }; + + let manager = ConnectionManager::::new(connection_string); trace!("Creating connection pool"); let pool = Pool::new(manager).map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; From 54000cd376866c324df0ec68d51620aeae4aaa91 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 6 Jan 2026 12:08:22 -0500 Subject: [PATCH 26/54] add initial postgres storage impl --- crates/core/src/storage/mod.rs | 4 + crates/core/src/storage/postgres.rs | 316 ++++++++++++++++++++++++++++ 2 files changed, 320 insertions(+) create mode 100644 crates/core/src/storage/postgres.rs diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index 768397cbd..0be8dd6eb 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -1,7 +1,11 @@ mod hash_map; +#[cfg(feature = "postgres")] +mod postgres; #[cfg(feature = "sqlite")] mod sqlite; pub use hash_map::HashMap as StorageHashMap; +#[cfg(feature = "postgres")] +pub use postgres::PostgresStorage; #[cfg(feature = "sqlite")] pub use sqlite::SqliteStorage; diff --git a/crates/core/src/storage/postgres.rs b/crates/core/src/storage/postgres.rs new file mode 100644 index 000000000..23e788747 --- /dev/null +++ b/crates/core/src/storage/postgres.rs @@ -0,0 +1,316 @@ +use log::debug; +use serde::{Deserialize, Serialize}; +use surfpool_db::diesel::{ + self, QueryableByName, RunQueryDsl, + connection::SimpleConnection, + r2d2::{ConnectionManager, Pool}, + sql_query, + sql_types::Text, +}; + +use crate::storage::{Storage, StorageConstructor, StorageError, StorageResult}; + +#[derive(QueryableByName, Debug)] +struct KvRecord { + #[diesel(sql_type = Text)] + key: String, + #[diesel(sql_type = Text)] + value: String, +} + +#[derive(QueryableByName, Debug)] +struct ValueRecord { + #[diesel(sql_type = Text)] + value: String, +} + +#[derive(QueryableByName, Debug)] +struct KeyRecord { + #[diesel(sql_type = Text)] + key: String, +} + +#[derive(Clone)] +pub struct PostgresStorage { + pool: Pool>, + _phantom: std::marker::PhantomData<(K, V)>, + table_name: String, +} + +const NAME: &str = "PostgreSQL"; + +impl PostgresStorage +where + K: Serialize + for<'de> Deserialize<'de>, + V: Serialize + for<'de> Deserialize<'de> + Clone, +{ + fn ensure_table_exists(&self) -> StorageResult<()> { + debug!("Ensuring table '{}' exists", self.table_name); + let create_table_sql = format!( + " + CREATE TABLE IF NOT EXISTS {} ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + ", + self.table_name + ); + + debug!("Getting connection from pool for table creation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + conn.batch_execute(&create_table_sql) + .map_err(|e| StorageError::create_table(&self.table_name, NAME, e))?; + + debug!("Successfully ensured table '{}' exists", self.table_name); + Ok(()) + } + + fn serialize_key(&self, key: &K) -> StorageResult { + trace!("Serializing key for table '{}'", self.table_name); + let result = + serde_json::to_string(key).map_err(|e| StorageError::SerializeKeyError(NAME.into(), e)); + if let Ok(ref serialized) = result { + trace!("Key serialized successfully: {}", serialized); + } + result + } + + fn serialize_value(&self, value: &V) -> StorageResult { + trace!("Serializing value for table '{}'", self.table_name); + let result = serde_json::to_string(value) + .map_err(|e| StorageError::SerializeValueError(NAME.into(), e)); + if let Ok(ref serialized) = result { + trace!( + "Value serialized successfully, length: {} chars", + serialized.len() + ); + } + result + } + + fn deserialize_value(&self, value_str: &str) -> StorageResult { + trace!( + "Deserializing value from table '{}', input length: {} chars", + self.table_name, + value_str.len() + ); + let result = serde_json::from_str(value_str) + .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e)); + if result.is_ok() { + trace!("Value deserialized successfully"); + } + result + } + + fn load_value_from_db(&self, key_str: &str) -> StorageResult> { + debug!("Loading value from DB for key: {}", key_str); + let query = sql_query(format!( + "SELECT value FROM {} WHERE key = $1", + self.table_name + )) + .bind::(key_str); + + trace!("Getting connection from pool for loading value"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get(&self.table_name, NAME, key_str, e))?; + + if let Some(record) = records.into_iter().next() { + debug!("Found record for key: {}", key_str); + let value = self.deserialize_value(&record.value)?; + Ok(Some(value)) + } else { + debug!("No record found for key: {}", key_str); + Ok(None) + } + } +} + +impl Storage for PostgresStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn store(&mut self, key: K, value: V) -> StorageResult<()> { + debug!("Storing value in table '{}", self.table_name); + let key_str = self.serialize_key(&key)?; + let value_str = self.serialize_value(&value)?; + + // Use PostgreSQL UPSERT syntax with ON CONFLICT + let query = sql_query(format!( + "INSERT INTO {} (key, value, updated_at) VALUES ($1, $2, CURRENT_TIMESTAMP) + ON CONFLICT (key) DO UPDATE SET + value = EXCLUDED.value, + updated_at = CURRENT_TIMESTAMP", + self.table_name + )) + .bind::(&key_str) + .bind::(&value_str); + + trace!("Getting connection from pool for store operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + query + .execute(&mut *conn) + .map_err(|e| StorageError::store(&self.table_name, NAME, &key_str, e))?; + + debug!("Value stored successfully in table '{}'", self.table_name); + Ok(()) + } + + fn get(&self, key: &K) -> StorageResult> { + debug!("Getting value from table '{}", self.table_name); + let key_str = self.serialize_key(key)?; + + self.load_value_from_db(&key_str) + } + + fn take(&mut self, key: &K) -> StorageResult> { + debug!("Taking value from table '{}'", self.table_name); + let key_str = self.serialize_key(key)?; + + // If not in cache, try to load from database + if let Some(value) = self.load_value_from_db(&key_str)? { + debug!("Value found, removing from database"); + // Remove from database + let delete_query = sql_query(format!("DELETE FROM {} WHERE key = $1", self.table_name)) + .bind::(&key_str); + + trace!("Getting connection from pool for delete operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + delete_query + .execute(&mut *conn) + .map_err(|e| StorageError::delete(&self.table_name, NAME, &key_str, e))?; + + debug!( + "Value taken and removed successfully from table '{}'", + self.table_name + ); + Ok(Some(value)) + } else { + debug!("No value found to take from table '{}'", self.table_name); + Ok(None) + } + } + + fn clear(&mut self) -> StorageResult<()> { + debug!("Clearing all data from table '{}'", self.table_name); + let delete_query = sql_query(format!("DELETE FROM {}", self.table_name)); + + trace!("Getting connection from pool for clear operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + delete_query + .execute(&mut *conn) + .map_err(|e| StorageError::delete(&self.table_name, NAME, "*all*", e))?; + + debug!("Table '{}' cleared successfully", self.table_name); + Ok(()) + } + + fn keys(&self) -> StorageResult> { + debug!("Fetching all keys from table '{}'", self.table_name); + let query = sql_query(format!("SELECT key FROM {}", self.table_name)); + + trace!("Getting connection from pool for keys operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get_all_keys(&self.table_name, NAME, e))?; + + let mut keys = Vec::new(); + for record in records { + let key: K = serde_json::from_str(&record.key) + .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e))?; + keys.push(key); + } + + debug!( + "Retrieved {} keys from table '{}'", + keys.len(), + self.table_name + ); + Ok(keys) + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn into_iter(&self) -> StorageResult + '_>> { + debug!( + "Creating iterator for all key-value pairs in table '{}'", + self.table_name + ); + let query = sql_query(format!("SELECT key, value FROM {}", self.table_name)); + + trace!("Getting connection from pool for into_iter operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get_all_key_value_pairs(&self.table_name, NAME, e))?; + + let iter = records.into_iter().filter_map(move |record| { + let key: K = match serde_json::from_str(&record.key) { + Ok(k) => k, + Err(e) => { + debug!("Failed to deserialize key: {}", e); + return None; + } + }; + let value: V = match serde_json::from_str(&record.value) { + Ok(v) => v, + Err(e) => { + debug!("Failed to deserialize value: {}", e); + return None; + } + }; + Some((key, value)) + }); + + debug!( + "Iterator created successfully for table '{}'", + self.table_name + ); + Ok(Box::new(iter)) + } +} + +impl StorageConstructor for PostgresStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn connect(database_url: &str, table_name: &str) -> StorageResult { + debug!( + "Connecting to PostgreSQL database: {} with table: {}", + database_url, table_name + ); + + let manager = ConnectionManager::::new(database_url); + trace!("Creating connection pool"); + let pool = + Pool::new(manager).map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; + + let storage = PostgresStorage { + pool, + _phantom: std::marker::PhantomData, + table_name: table_name.to_string(), + }; + + storage.ensure_table_exists()?; + debug!( + "PostgreSQL storage connected successfully for table: {}", + table_name + ); + Ok(storage) + } +} From 09d85925789a9b54b8062803b8cce898084369dd Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 6 Jan 2026 12:08:56 -0500 Subject: [PATCH 27/54] create/impl storage instantiation helper fn --- crates/core/src/storage/mod.rs | 62 +++++++++++++++++++++ crates/core/src/surfnet/surfnet_lite_svm.rs | 4 +- crates/core/src/surfnet/svm.rs | 9 +-- 3 files changed, 66 insertions(+), 9 deletions(-) diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index 0be8dd6eb..6edb5b08e 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -11,8 +11,70 @@ pub use sqlite::SqliteStorage; use crate::error::SurfpoolError; +pub fn new_kv_store( + database_url: &Option<&str>, + table_name: &str, +) -> StorageResult>> +where + K: serde::Serialize + + serde::de::DeserializeOwned + + Send + + Sync + + 'static + + Clone + + Eq + + std::hash::Hash, + V: serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static + Clone, +{ + match database_url { + Some(url) => { + #[cfg(feature = "postgres")] + if url.starts_with("postgres://") || url.starts_with("postgresql://") { + let storage = PostgresStorage::connect(url, table_name)?; + Ok(Box::new(storage)) + } else { + #[cfg(feature = "sqlite")] + { + let storage = SqliteStorage::connect(url, table_name)?; + Ok(Box::new(storage)) + } + #[cfg(not(feature = "sqlite"))] + { + Err(StorageError::InvalidPostgresUrl(url.to_string())) + } + } + + #[cfg(not(feature = "postgres"))] + if url.starts_with("postgres://") || url.starts_with("postgresql://") { + Err(StorageError::PostgresNotEnabled) + } else { + #[cfg(feature = "sqlite")] + { + let storage = + SqliteStorage::connect(database_url.unwrap_or(":memory:"), table_name)?; + Ok(Box::new(storage)) + } + #[cfg(not(feature = "sqlite"))] + { + Err(StorageError::SqliteNotEnabled) + } + } + } + _ => { + let storage = StorageHashMap::new(); + Ok(Box::new(storage)) + } + } +} + #[derive(Debug, thiserror::Error)] pub enum StorageError { + #[error("Sqlite storage is not enabled in this build")] + SqliteNotEnabled, + #[error("Postgres storage is not enabled in this build")] + PostgresNotEnabled, + #[error("Invalid Postgres database URL: {0}")] + InvalidPostgresUrl(String), #[error("Failed to get pooled connection for '{0}' database: {1}")] PooledConnectionError(String, #[source] surfpool_db::diesel::r2d2::PoolError), #[error("Failed to serialize key for '{0}' database: {1}")] diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs index 6ad794e58..4f4ff43e6 100644 --- a/crates/core/src/surfnet/surfnet_lite_svm.rs +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -14,7 +14,7 @@ use solana_transaction::versioned::VersionedTransaction; use crate::{ error::{SurfpoolError, SurfpoolResult}, - storage::{SqliteStorage, Storage, StorageConstructor}, + storage::{Storage, new_kv_store}, surfnet::{GetAccountResult, locker::is_supported_token_program}, }; @@ -46,7 +46,7 @@ impl SurfnetLiteSvm { if let Some(db_url) = database_url { let db: Box> = - Box::new(SqliteStorage::connect(db_url, "accounts")?); + new_kv_store(&Some(db_url), "accounts")?; self.db = Some(db); } diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index c55da3bf8..a53f5244e 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -97,7 +97,7 @@ use crate::{ error::{SurfpoolError, SurfpoolResult}, rpc::utils::convert_transaction_metadata_from_canonical, scenarios::TemplateRegistry, - storage::{SqliteStorage, Storage, StorageConstructor}, + storage::{Storage, new_kv_store}, surfnet::{ LogsSubscriptionData, locker::is_supported_token_program, surfnet_lite_svm::SurfnetLiteSvm, }, @@ -322,12 +322,7 @@ impl SurfnetSvm { let token_mints = HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); - let blocks_db = if let Some(database_url) = database_url { - Box::new(SqliteStorage::connect(database_url, "blocks")?) - as Box> - } else { - Box::new(crate::storage::StorageHashMap::new()) as Box> - }; + let blocks_db = new_kv_store(&database_url, "blocks")?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() From 01ee88cfbc5225bcad62603ccc83270b84a1ce2d Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 6 Jan 2026 13:24:29 -0500 Subject: [PATCH 28/54] feat: add surfnet_id to isolate database storage for multiple surfnets --- crates/cli/src/cli/mod.rs | 4 ++ crates/cli/src/cli/simnet/mod.rs | 2 +- crates/core/src/storage/mod.rs | 13 +++--- crates/core/src/storage/postgres.rs | 50 +++++++++++++++------ crates/core/src/storage/sqlite.rs | 46 ++++++++++++++----- crates/core/src/surfnet/surfnet_lite_svm.rs | 3 +- crates/core/src/surfnet/svm.rs | 10 +++-- crates/types/src/types.rs | 4 ++ 8 files changed, 94 insertions(+), 38 deletions(-) diff --git a/crates/cli/src/cli/mod.rs b/crates/cli/src/cli/mod.rs index cacd393d3..f8e7aaaa4 100644 --- a/crates/cli/src/cli/mod.rs +++ b/crates/cli/src/cli/mod.rs @@ -248,6 +248,9 @@ pub struct StartSimnet { /// Surfnet database connection URL for persistent Surfnets. For an in-memory sqlite database, use ":memory:". For an on-disk sqlite database, use a filename ending in '.sqlite'. #[arg(long = "db")] pub db: Option, + /// Unique identifier for this surfnet instance. Used to isolate database storage when multiple surfnets share the same database. Defaults to 0. + #[arg(long = "surfnet-id", default_value_t = 0)] + pub surfnet_id: u32, } fn parse_svm_feature(s: &str) -> Result { @@ -399,6 +402,7 @@ impl StartSimnet { }, feature_config: self.feature_config(), skip_signature_verification: false, + surfnet_id: self.surfnet_id, } } diff --git a/crates/cli/src/cli/simnet/mod.rs b/crates/cli/src/cli/simnet/mod.rs index c514414a3..3b455dbf1 100644 --- a/crates/cli/src/cli/simnet/mod.rs +++ b/crates/cli/src/cli/simnet/mod.rs @@ -61,7 +61,7 @@ pub async fn handle_start_local_surfnet_command( // We start the simnet as soon as possible, as it needs to be ready for deployments let (mut surfnet_svm, simnet_events_rx, geyser_events_rx) = - SurfnetSvm::new_with_db(cmd.db.as_deref()) + SurfnetSvm::new_with_db(cmd.db.as_deref(), cmd.surfnet_id) .map_err(|e| format!("Failed to initialize Surfnet SVM: {}", e))?; // Apply feature configuration from CLI flags diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index 6edb5b08e..ec8e68df0 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -14,6 +14,7 @@ use crate::error::SurfpoolError; pub fn new_kv_store( database_url: &Option<&str>, table_name: &str, + surfnet_id: u32, ) -> StorageResult>> where K: serde::Serialize @@ -30,12 +31,12 @@ where Some(url) => { #[cfg(feature = "postgres")] if url.starts_with("postgres://") || url.starts_with("postgresql://") { - let storage = PostgresStorage::connect(url, table_name)?; + let storage = PostgresStorage::connect(url, table_name, surfnet_id)?; Ok(Box::new(storage)) } else { #[cfg(feature = "sqlite")] { - let storage = SqliteStorage::connect(url, table_name)?; + let storage = SqliteStorage::connect(url, table_name, surfnet_id)?; Ok(Box::new(storage)) } #[cfg(not(feature = "sqlite"))] @@ -51,7 +52,7 @@ where #[cfg(feature = "sqlite")] { let storage = - SqliteStorage::connect(database_url.unwrap_or(":memory:"), table_name)?; + SqliteStorage::connect(database_url.unwrap_or(":memory:"), table_name, surfnet_id)?; Ok(Box::new(storage)) } #[cfg(not(feature = "sqlite"))] @@ -209,7 +210,7 @@ impl Clone for Box> { // Separate trait for construction - this doesn't need to be dyn-compatible pub trait StorageConstructor: Storage + Clone { - fn connect(database_url: &str, table_name: &str) -> StorageResult + fn connect(database_url: &str, table_name: &str, surfnet_id: u32) -> StorageResult where Self: Sized; } @@ -233,9 +234,9 @@ pub mod tests { pub fn initialize_svm(&self) -> (SurfnetSvm, Receiver, Receiver) { match &self { TestType::NoDb => SurfnetSvm::new(), - TestType::InMemorySqlite => SurfnetSvm::new_with_db(Some(":memory:")).unwrap(), + TestType::InMemorySqlite => SurfnetSvm::new_with_db(Some(":memory:"), 0).unwrap(), TestType::OnDiskSqlite(db_path) => { - SurfnetSvm::new_with_db(Some(db_path.as_ref())).unwrap() + SurfnetSvm::new_with_db(Some(db_path.as_ref()), 0).unwrap() } } } diff --git a/crates/core/src/storage/postgres.rs b/crates/core/src/storage/postgres.rs index 23e788747..c9f183c6d 100644 --- a/crates/core/src/storage/postgres.rs +++ b/crates/core/src/storage/postgres.rs @@ -35,6 +35,7 @@ pub struct PostgresStorage { pool: Pool>, _phantom: std::marker::PhantomData<(K, V)>, table_name: String, + surfnet_id: u32, } const NAME: &str = "PostgreSQL"; @@ -49,10 +50,12 @@ where let create_table_sql = format!( " CREATE TABLE IF NOT EXISTS {} ( - key TEXT PRIMARY KEY, + surfnet_id INTEGER NOT NULL, + key TEXT NOT NULL, value TEXT NOT NULL, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (surfnet_id, key) ) ", self.table_name @@ -108,9 +111,10 @@ where fn load_value_from_db(&self, key_str: &str) -> StorageResult> { debug!("Loading value from DB for key: {}", key_str); let query = sql_query(format!( - "SELECT value FROM {} WHERE key = $1", + "SELECT value FROM {} WHERE surfnet_id = $1 AND key = $2", self.table_name )) + .bind::(self.surfnet_id as i32) .bind::(key_str); trace!("Getting connection from pool for loading value"); @@ -143,12 +147,13 @@ where // Use PostgreSQL UPSERT syntax with ON CONFLICT let query = sql_query(format!( - "INSERT INTO {} (key, value, updated_at) VALUES ($1, $2, CURRENT_TIMESTAMP) - ON CONFLICT (key) DO UPDATE SET - value = EXCLUDED.value, + "INSERT INTO {} (surfnet_id, key, value, updated_at) VALUES ($1, $2, $3, CURRENT_TIMESTAMP) + ON CONFLICT (surfnet_id, key) DO UPDATE SET + value = EXCLUDED.value, updated_at = CURRENT_TIMESTAMP", self.table_name )) + .bind::(self.surfnet_id as i32) .bind::(&key_str) .bind::(&value_str); @@ -178,8 +183,12 @@ where if let Some(value) = self.load_value_from_db(&key_str)? { debug!("Value found, removing from database"); // Remove from database - let delete_query = sql_query(format!("DELETE FROM {} WHERE key = $1", self.table_name)) - .bind::(&key_str); + let delete_query = sql_query(format!( + "DELETE FROM {} WHERE surfnet_id = $1 AND key = $2", + self.table_name + )) + .bind::(self.surfnet_id as i32) + .bind::(&key_str); trace!("Getting connection from pool for delete operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; @@ -201,7 +210,11 @@ where fn clear(&mut self) -> StorageResult<()> { debug!("Clearing all data from table '{}'", self.table_name); - let delete_query = sql_query(format!("DELETE FROM {}", self.table_name)); + let delete_query = sql_query(format!( + "DELETE FROM {} WHERE surfnet_id = $1", + self.table_name + )) + .bind::(self.surfnet_id as i32); trace!("Getting connection from pool for clear operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; @@ -216,7 +229,11 @@ where fn keys(&self) -> StorageResult> { debug!("Fetching all keys from table '{}'", self.table_name); - let query = sql_query(format!("SELECT key FROM {}", self.table_name)); + let query = sql_query(format!( + "SELECT key FROM {} WHERE surfnet_id = $1", + self.table_name + )) + .bind::(self.surfnet_id as i32); trace!("Getting connection from pool for keys operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; @@ -249,7 +266,11 @@ where "Creating iterator for all key-value pairs in table '{}'", self.table_name ); - let query = sql_query(format!("SELECT key, value FROM {}", self.table_name)); + let query = sql_query(format!( + "SELECT key, value FROM {} WHERE surfnet_id = $1", + self.table_name + )) + .bind::(self.surfnet_id as i32); trace!("Getting connection from pool for into_iter operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; @@ -289,10 +310,10 @@ where K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, { - fn connect(database_url: &str, table_name: &str) -> StorageResult { + fn connect(database_url: &str, table_name: &str, surfnet_id: u32) -> StorageResult { debug!( - "Connecting to PostgreSQL database: {} with table: {}", - database_url, table_name + "Connecting to PostgreSQL database: {} with table: {} and surfnet_id: {}", + database_url, table_name, surfnet_id ); let manager = ConnectionManager::::new(database_url); @@ -304,6 +325,7 @@ where pool, _phantom: std::marker::PhantomData, table_name: table_name.to_string(), + surfnet_id, }; storage.ensure_table_exists()?; diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index b6b5315bd..b2a536944 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -35,6 +35,7 @@ pub struct SqliteStorage { pool: Pool>, _phantom: std::marker::PhantomData<(K, V)>, table_name: String, + surfnet_id: u32, } const NAME: &str = "SQLite"; @@ -49,10 +50,12 @@ where let create_table_sql = format!( " CREATE TABLE IF NOT EXISTS {} ( - key TEXT PRIMARY KEY, + surfnet_id INTEGER NOT NULL, + key TEXT NOT NULL, value TEXT NOT NULL, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, - updated_at DATETIME DEFAULT CURRENT_TIMESTAMP + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (surfnet_id, key) ) ", self.table_name @@ -108,9 +111,10 @@ where fn load_value_from_db(&self, key_str: &str) -> StorageResult> { debug!("Loading value from DB for key: {}", key_str); let query = sql_query(format!( - "SELECT value FROM {} WHERE key = ?", + "SELECT value FROM {} WHERE surfnet_id = ? AND key = ?", self.table_name )) + .bind::(self.surfnet_id as i32) .bind::(key_str); trace!("Getting connection from pool for loading value"); @@ -143,9 +147,10 @@ where // Use prepared statement with sql_query for better safety let query = sql_query(format!( - "INSERT OR REPLACE INTO {} (key, value, updated_at) VALUES (?, ?, CURRENT_TIMESTAMP)", + "INSERT OR REPLACE INTO {} (surfnet_id, key, value, updated_at) VALUES (?, ?, ?, CURRENT_TIMESTAMP)", self.table_name )) + .bind::(self.surfnet_id as i32) .bind::(&key_str) .bind::(&value_str); @@ -175,8 +180,12 @@ where if let Some(value) = self.load_value_from_db(&key_str)? { debug!("Value found, removing from database"); // Remove from database - let delete_query = sql_query(format!("DELETE FROM {} WHERE key = ?", self.table_name)) - .bind::(&key_str); + let delete_query = sql_query(format!( + "DELETE FROM {} WHERE surfnet_id = ? AND key = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32) + .bind::(&key_str); trace!("Getting connection from pool for delete operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; @@ -198,7 +207,11 @@ where fn clear(&mut self) -> StorageResult<()> { debug!("Clearing all data from table '{}'", self.table_name); - let delete_query = sql_query(format!("DELETE FROM {}", self.table_name)); + let delete_query = sql_query(format!( + "DELETE FROM {} WHERE surfnet_id = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32); trace!("Getting connection from pool for clear operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; @@ -213,7 +226,11 @@ where fn keys(&self) -> StorageResult> { debug!("Fetching all keys from table '{}'", self.table_name); - let query = sql_query(format!("SELECT key FROM {}", self.table_name)); + let query = sql_query(format!( + "SELECT key FROM {} WHERE surfnet_id = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32); trace!("Getting connection from pool for keys operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; @@ -246,7 +263,11 @@ where "Creating iterator for all key-value pairs in table '{}'", self.table_name ); - let query = sql_query(format!("SELECT key, value FROM {}", self.table_name)); + let query = sql_query(format!( + "SELECT key, value FROM {} WHERE surfnet_id = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32); trace!("Getting connection from pool for into_iter operation"); let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; @@ -286,10 +307,10 @@ where K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, { - fn connect(database_url: &str, table_name: &str) -> StorageResult { + fn connect(database_url: &str, table_name: &str, surfnet_id: u32) -> StorageResult { debug!( - "Connecting to SQLite database: {} with table: {}", - database_url, table_name + "Connecting to SQLite database: {} with table: {} and surfnet_id: {}", + database_url, table_name, surfnet_id ); let connection_string = if database_url != ":memory:" { @@ -312,6 +333,7 @@ where pool, _phantom: std::marker::PhantomData, table_name: table_name.to_string(), + surfnet_id, }; storage.ensure_table_exists()?; diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs index 4f4ff43e6..b301b2b9d 100644 --- a/crates/core/src/surfnet/surfnet_lite_svm.rs +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -36,6 +36,7 @@ impl SurfnetLiteSvm { mut self, feature_set: FeatureSet, database_url: Option<&str>, + surfnet_id: u32, ) -> SurfpoolResult { self.svm = LiteSVM::new() .with_blockhash_check(false) @@ -46,7 +47,7 @@ impl SurfnetLiteSvm { if let Some(db_url) = database_url { let db: Box> = - new_kv_store(&Some(db_url), "accounts")?; + new_kv_store(&Some(db_url), "accounts", surfnet_id)?; self.db = Some(db); } diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index a53f5244e..d76747025 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -283,13 +283,14 @@ pub const FEATURE: Feature = Feature { impl SurfnetSvm { pub fn new() -> (Self, Receiver, Receiver) { - Self::_new(None).unwrap() + Self::_new(None, 0).unwrap() } pub fn new_with_db( database_url: Option<&str>, + surfnet_id: u32, ) -> SurfpoolResult<(Self, Receiver, Receiver)> { - Self::_new(database_url) + Self::_new(database_url, surfnet_id) } /// Creates a new instance of `SurfnetSvm`. @@ -297,6 +298,7 @@ impl SurfnetSvm { /// Returns a tuple containing the SVM instance, a receiver for simulation events, and a receiver for Geyser plugin events. fn _new( database_url: Option<&str>, + surfnet_id: u32, ) -> SurfpoolResult<(Self, Receiver, Receiver)> { let (simnet_events_tx, simnet_events_rx) = crossbeam_channel::bounded(1024); let (geyser_events_tx, geyser_events_rx) = crossbeam_channel::bounded(1024); @@ -307,7 +309,7 @@ impl SurfnetSvm { // todo: consider making this configurable via config feature_set.deactivate(&enable_extend_program_checked::id()); - let inner = SurfnetLiteSvm::new().initialize(feature_set.clone(), database_url)?; + let inner = SurfnetLiteSvm::new().initialize(feature_set.clone(), database_url, surfnet_id)?; let native_mint_account = inner .get_account(&spl_token_interface::native_mint::ID)? @@ -322,7 +324,7 @@ impl SurfnetSvm { let token_mints = HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); - let blocks_db = new_kv_store(&database_url, "blocks")?; + let blocks_db = new_kv_store(&database_url, "blocks", surfnet_id)?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() diff --git a/crates/types/src/types.rs b/crates/types/src/types.rs index 55f577b0d..164106667 100644 --- a/crates/types/src/types.rs +++ b/crates/types/src/types.rs @@ -549,6 +549,9 @@ pub struct SimnetConfig { pub log_bytes_limit: Option, pub feature_config: SvmFeatureConfig, pub skip_signature_verification: bool, + /// Unique identifier for this surfnet instance. Used to isolate database storage + /// when multiple surfnets share the same database. Defaults to 0. + pub surfnet_id: u32, } impl Default for SimnetConfig { @@ -566,6 +569,7 @@ impl Default for SimnetConfig { log_bytes_limit: Some(10_000), feature_config: SvmFeatureConfig::default(), skip_signature_verification: false, + surfnet_id: 0, } } } From 7b973075d8b2efe2341b5952c6b8ee44a5692846 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 6 Jan 2026 14:54:13 -0500 Subject: [PATCH 29/54] add test utils for postgres storage tests --- crates/core/src/storage/mod.rs | 74 +++++++++++++++++++++++++++++++++- 1 file changed, 72 insertions(+), 2 deletions(-) diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index ec8e68df0..1f0e4890d 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -51,8 +51,11 @@ where } else { #[cfg(feature = "sqlite")] { - let storage = - SqliteStorage::connect(database_url.unwrap_or(":memory:"), table_name, surfnet_id)?; + let storage = SqliteStorage::connect( + database_url.unwrap_or(":memory:"), + table_name, + surfnet_id, + )?; Ok(Box::new(storage)) } #[cfg(not(feature = "sqlite"))] @@ -217,6 +220,8 @@ pub trait StorageConstructor: Storage + Clone { #[cfg(test)] pub mod tests { + use std::collections::hash_map::RandomState; + use std::hash::{BuildHasher, Hasher}; use std::os::unix::fs::PermissionsExt; use crossbeam_channel::Receiver; @@ -224,10 +229,34 @@ pub mod tests { use crate::surfnet::{GeyserEvent, svm::SurfnetSvm}; + /// Environment variable for PostgreSQL database URL used in tests + pub const POSTGRES_TEST_URL_ENV: &str = "SURFPOOL_TEST_POSTGRES_URL"; + + /// Generates a random u32 using std's RandomState (no external dependencies) + pub fn random_surfnet_id() -> u32 { + let state = RandomState::new(); + let mut hasher = state.build_hasher(); + // Use thread name/id as string since as_u64() is unstable + hasher.write(format!("{:?}", std::thread::current().id()).as_bytes()); + hasher.write_u128( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos(), + ); + hasher.finish() as u32 + } + pub enum TestType { NoDb, InMemorySqlite, OnDiskSqlite(String), + /// PostgreSQL with a random surfnet_id for test isolation + #[cfg(feature = "postgres")] + Postgres { + url: String, + surfnet_id: u32, + }, } impl TestType { @@ -238,6 +267,10 @@ pub mod tests { TestType::OnDiskSqlite(db_path) => { SurfnetSvm::new_with_db(Some(db_path.as_ref()), 0).unwrap() } + #[cfg(feature = "postgres")] + TestType::Postgres { url, surfnet_id } => { + SurfnetSvm::new_with_db(Some(url.as_ref()), *surfnet_id).unwrap() + } } } @@ -245,12 +278,47 @@ pub mod tests { let database_url = crate::storage::tests::create_tmp_sqlite_storage(); TestType::OnDiskSqlite(database_url) } + pub fn no_db() -> Self { TestType::NoDb } + pub fn in_memory() -> Self { TestType::InMemorySqlite } + + /// Creates a PostgreSQL test type with a random surfnet_id for test isolation. + /// The database URL is read from the SURFPOOL_TEST_POSTGRES_URL environment variable. + /// Panics if the environment variable is not set. + #[cfg(feature = "postgres")] + pub fn postgres() -> Self { + let url = std::env::var(POSTGRES_TEST_URL_ENV).unwrap_or_else(|_| { + panic!( + "PostgreSQL test URL not set. Set the {} environment variable.", + POSTGRES_TEST_URL_ENV + ) + }); + let surfnet_id = random_surfnet_id(); + println!( + "Created PostgreSQL test connection with surfnet_id: {}", + surfnet_id + ); + TestType::Postgres { url, surfnet_id } + } + + /// Creates a PostgreSQL test type with a random surfnet_id for test isolation. + /// Returns None if the SURFPOOL_TEST_POSTGRES_URL environment variable is not set. + #[cfg(feature = "postgres")] + pub fn postgres_if_available() -> Option { + std::env::var(POSTGRES_TEST_URL_ENV).ok().map(|url| { + let surfnet_id = random_surfnet_id(); + println!( + "Created PostgreSQL test connection with surfnet_id: {}", + surfnet_id + ); + TestType::Postgres { url, surfnet_id } + }) + } } impl Drop for TestType { @@ -259,6 +327,8 @@ pub mod tests { // Delete file at db_path when TestType goes out of scope let _ = std::fs::remove_file(db_path); } + // Note: PostgreSQL data is isolated by surfnet_id and doesn't need cleanup + // The random surfnet_id ensures test isolation without table cleanup } } From 1178a732d36b179178887c53cfbe295dc46ae97f Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 6 Jan 2026 14:54:25 -0500 Subject: [PATCH 30/54] feat: implement shared connection pool for PostgreSQL storage to optimize resource usage in parallel tests --- crates/core/src/storage/postgres.rs | 36 +++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/crates/core/src/storage/postgres.rs b/crates/core/src/storage/postgres.rs index c9f183c6d..730c7296a 100644 --- a/crates/core/src/storage/postgres.rs +++ b/crates/core/src/storage/postgres.rs @@ -1,3 +1,6 @@ +use std::collections::HashMap; +use std::sync::{Mutex, OnceLock}; + use log::debug; use serde::{Deserialize, Serialize}; use surfpool_db::diesel::{ @@ -10,6 +13,32 @@ use surfpool_db::diesel::{ use crate::storage::{Storage, StorageConstructor, StorageError, StorageResult}; +/// Global shared connection pools keyed by database URL. +/// This allows multiple PostgresStorage instances to share the same pool, +/// which is essential for tests that run in parallel. +static SHARED_POOLS: OnceLock>>>> = OnceLock::new(); + +fn get_or_create_shared_pool(database_url: &str) -> StorageResult>> { + let pools = SHARED_POOLS.get_or_init(|| Mutex::new(HashMap::new())); + let mut pools_guard = pools.lock().map_err(|_| StorageError::LockError)?; + + if let Some(pool) = pools_guard.get(database_url) { + debug!("Reusing existing shared PostgreSQL connection pool for {}", database_url); + return Ok(pool.clone()); + } + + debug!("Creating new shared PostgreSQL connection pool for {}", database_url); + let manager = ConnectionManager::::new(database_url); + let pool = Pool::builder() + .max_size(10) // Limit total connections across all tests + .min_idle(Some(1)) + .build(manager) + .map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; + + pools_guard.insert(database_url.to_string(), pool.clone()); + Ok(pool) +} + #[derive(QueryableByName, Debug)] struct KvRecord { #[diesel(sql_type = Text)] @@ -316,10 +345,9 @@ where database_url, table_name, surfnet_id ); - let manager = ConnectionManager::::new(database_url); - trace!("Creating connection pool"); - let pool = - Pool::new(manager).map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; + // Use shared connection pool to avoid exhausting connections when many + // instances connect to the same database (e.g., parallel tests) + let pool = get_or_create_shared_pool(database_url)?; let storage = PostgresStorage { pool, From 72517f8612d3763845f58b5e00d18270ff35cd6b Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 6 Jan 2026 15:23:38 -0500 Subject: [PATCH 31/54] add postgres db test_cases --- crates/core/src/surfnet/svm.rs | 19 +++++++++ crates/core/src/tests/integration.rs | 64 ++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index d76747025..a90cd3624 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -2803,6 +2803,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_synthetic_blockhash_generation(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -2866,6 +2867,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_blockhash_consistency_across_calls(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -2900,6 +2902,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_token_account_indexing(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -2954,6 +2957,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_account_update_removes_old_indexes(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3026,6 +3030,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_non_token_accounts_not_indexed(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3132,6 +3137,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_inserting_account_updates(test_type: TestType) { let (mut svm, events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3311,6 +3317,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_encode_ui_account(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3541,6 +3548,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_profiling_map_capacity_default(test_type: TestType) { let (svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); assert_eq!( @@ -3552,6 +3560,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_profiling_map_capacity_set(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); svm.set_profiling_map_capacity(10); @@ -3591,6 +3600,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_apply_feature_config_empty(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::new(); @@ -3602,6 +3612,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_apply_feature_config_enable_feature(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3620,6 +3631,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_apply_feature_config_disable_feature(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3637,6 +3649,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_apply_feature_config_mainnet_defaults(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::default_mainnet_features(); @@ -3683,6 +3696,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_apply_feature_config_mainnet_with_override(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3706,6 +3720,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_apply_feature_config_multiple_changes(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3729,6 +3744,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_apply_feature_config_preserves_native_mint(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3755,6 +3771,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_apply_feature_config_idempotent(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3776,6 +3793,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_garbage_collected_account_tracking(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); @@ -3811,6 +3829,7 @@ mod tests { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_garbage_collected_token_account_cleanup(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index ecb7dcabe..c2940c74a 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -82,6 +82,7 @@ fn wait_for_ready_and_connected(simnet_events_rx: &crossbeam_channel::Receiver TestType::sqlite(), TestType::InMemorySqlite => TestType::in_memory(), TestType::NoDb => TestType::no_db(), + #[cfg(feature = "postgres")] + TestType::Postgres { url, .. } => TestType::Postgres { + url: url.clone(), + surfnet_id: crate::storage::tests::random_surfnet_id(), + }, }; // Start datasource surfnet first, which will only have accounts we airdrop to let (datasource_surfnet_url, _datasource_svm_locker) = @@ -4535,6 +4576,7 @@ async fn test_ws_signature_subscribe(subscription_type: SignatureSubscriptionTyp #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_signature_subscribe_failed_transaction(test_type: TestType) { use crossbeam_channel::unbounded; @@ -4597,6 +4639,7 @@ async fn test_ws_signature_subscribe_failed_transaction(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_signature_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; @@ -4675,6 +4718,7 @@ async fn test_ws_signature_subscribe_multiple_subscribers(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_signature_subscribe_before_transaction_exists(test_type: TestType) { use crossbeam_channel::unbounded; @@ -4740,6 +4784,7 @@ async fn test_ws_signature_subscribe_before_transaction_exists(test_type: TestTy #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_account_subscribe_balance_change(test_type: TestType) { use crossbeam_channel::unbounded; @@ -4800,6 +4845,7 @@ async fn test_ws_account_subscribe_balance_change(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_account_subscribe_multiple_changes(test_type: TestType) { use crossbeam_channel::unbounded; @@ -4866,6 +4912,7 @@ async fn test_ws_account_subscribe_multiple_changes(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_account_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; @@ -4935,6 +4982,7 @@ async fn test_ws_account_subscribe_multiple_subscribers(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_account_subscribe_new_account_creation(test_type: TestType) { use crossbeam_channel::unbounded; @@ -4998,6 +5046,7 @@ async fn test_ws_account_subscribe_new_account_creation(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_account_subscribe_account_closure(test_type: TestType) { use crossbeam_channel::unbounded; @@ -5055,6 +5104,7 @@ async fn test_ws_account_subscribe_account_closure(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_slot_subscribe_basic(test_type: TestType) { use surfpool_types::types::BlockProductionMode; @@ -5089,6 +5139,7 @@ async fn test_ws_slot_subscribe_basic(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_slot_subscribe_manual_advancement(test_type: TestType) { let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); @@ -5123,6 +5174,7 @@ async fn test_ws_slot_subscribe_manual_advancement(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_slot_subscribe_multiple_subscribers(test_type: TestType) { let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); @@ -5156,6 +5208,7 @@ async fn test_ws_slot_subscribe_multiple_subscribers(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_slot_subscribe_multiple_slot_changes(test_type: TestType) { let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); @@ -5186,6 +5239,7 @@ async fn test_ws_slot_subscribe_multiple_slot_changes(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_logs_subscribe_all_transactions(test_type: TestType) { use crossbeam_channel::unbounded; @@ -5253,6 +5307,7 @@ async fn test_ws_logs_subscribe_all_transactions(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_logs_subscribe_mentions_account(test_type: TestType) { use crossbeam_channel::unbounded; @@ -5331,6 +5386,7 @@ async fn test_ws_logs_subscribe_mentions_account(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_logs_subscribe_confirmed_commitment(test_type: TestType) { use crossbeam_channel::unbounded; @@ -5395,6 +5451,7 @@ async fn test_ws_logs_subscribe_confirmed_commitment(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_logs_subscribe_finalized_commitment(test_type: TestType) { use crossbeam_channel::unbounded; @@ -5466,6 +5523,7 @@ async fn test_ws_logs_subscribe_finalized_commitment(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_logs_subscribe_failed_transaction(test_type: TestType) { use crossbeam_channel::unbounded; @@ -5531,6 +5589,7 @@ async fn test_ws_logs_subscribe_failed_transaction(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_logs_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; @@ -5601,6 +5660,7 @@ async fn test_ws_logs_subscribe_multiple_subscribers(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_ws_logs_subscribe_logs_content(test_type: TestType) { use crossbeam_channel::unbounded; @@ -5675,6 +5735,7 @@ async fn test_ws_logs_subscribe_logs_content(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_token2022_full_lifecycle(test_type: TestType) { use solana_system_interface::instruction as system_instruction; @@ -5890,6 +5951,7 @@ async fn test_token2022_full_lifecycle(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_token2022_error_cases(test_type: TestType) { use solana_system_interface::instruction as system_instruction; @@ -6072,6 +6134,7 @@ async fn test_token2022_error_cases(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_token2022_delegate_operations(test_type: TestType) { use solana_system_interface::instruction as system_instruction; @@ -6293,6 +6356,7 @@ async fn test_token2022_delegate_operations(test_type: TestType) { #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] #[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] async fn test_token2022_freeze_thaw(test_type: TestType) { use solana_system_interface::instruction as system_instruction; From 8dd0ed02ebb0ce79ab02bf8466ec54abd4d9d24b Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 6 Jan 2026 15:26:04 -0500 Subject: [PATCH 32/54] feat: add SQLite pragmas for performance and reliability in storage --- crates/core/src/storage/sqlite.rs | 38 +++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index b2a536944..92647b4f9 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -336,6 +336,44 @@ where surfnet_id, }; + // Set SQLite pragmas for performance and reliability + { + let mut conn = storage.pool.get().map_err(|_| StorageError::LockError)?; + + // Different pragma sets for file-based vs in-memory databases + let pragmas = if database_url == ":memory:" { + // In-memory database pragmas (WAL not supported) + " + PRAGMA synchronous=OFF; + PRAGMA temp_store=MEMORY; + PRAGMA cache_size=-64000; + PRAGMA busy_timeout=5000; + " + } else { + // File-based database pragmas + " + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA temp_store=MEMORY; + PRAGMA mmap_size=268435456; + PRAGMA cache_size=-64000; + PRAGMA busy_timeout=5000; + PRAGMA wal_autocheckpoint=1000; + " + // Pragma explanations: + // - journal_mode=WAL: Write-Ahead Logging for better concurrency and crash recovery + // - synchronous=NORMAL: Safe with WAL mode, good performance/durability balance + // - temp_store=MEMORY: Store temp tables in memory for speed + // - mmap_size=268435456: 256MB memory-mapped I/O for faster reads + // - cache_size=-64000: 64MB page cache (negative = KB) + // - busy_timeout=5000: Wait 5s for locks instead of failing immediately + // - wal_autocheckpoint=1000: Checkpoint WAL after 1000 pages (~4MB with default page size) + }; + + conn.batch_execute(pragmas) + .map_err(|e| StorageError::create_table(table_name, NAME, e))?; + } + storage.ensure_table_exists()?; debug!( "SQLite storage connected successfully for table: {}", From 0a1f85c01cbfd8ae9e87234bde3e6222629b1b38 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 6 Jan 2026 17:19:19 -0500 Subject: [PATCH 33/54] upgrade `SurfnetSvm` transactions to use Storage rather than HashMap --- Cargo.lock | 1 + crates/core/Cargo.toml | 1 + crates/core/src/rpc/full.rs | 16 +-- crates/core/src/rpc/surfnet_cheatcodes.rs | 31 +++-- crates/core/src/surfnet/locker.rs | 106 ++++++++------- crates/core/src/surfnet/svm.rs | 27 ++-- crates/core/src/types.rs | 158 +++++++++++++++++++++- 7 files changed, 252 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 666b02d52..770eebb04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12287,6 +12287,7 @@ dependencies = [ "solana-sysvar 3.0.0", "solana-sysvar-id 3.0.0", "solana-transaction", + "solana-transaction-context", "solana-transaction-error 3.0.0", "solana-transaction-status", "solana-version", diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index b3ec7a7d5..3218a1404 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -79,6 +79,7 @@ solana-system-interface = { workspace = true } solana-sysvar = { workspace = true } solana-sysvar-id = { workspace = true } solana-transaction = { workspace = true } +solana-transaction-context = { workspace = true } solana-transaction-error = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } diff --git a/crates/core/src/rpc/full.rs b/crates/core/src/rpc/full.rs index aa711522e..2154dc869 100644 --- a/crates/core/src/rpc/full.rs +++ b/crates/core/src/rpc/full.rs @@ -2313,7 +2313,7 @@ impl Full for SurfpoolFullRpc { .iter() .filter_map(|signature| { // Check if the signature exists in the transactions map - transactions.get(signature).map(|tx| (slot, tx)) + transactions.get(&signature.to_string()).ok().flatten().map(|tx| (slot, tx)) }) .collect::>() }) @@ -2533,10 +2533,10 @@ mod tests { ..Default::default() }; let mutated_accounts = std::collections::HashSet::new(); - writer.transactions.insert( - sig, + writer.transactions.store( + sig.to_string(), SurfnetTransactionStatus::processed(tx_with_status_meta, mutated_accounts), - ); + ).unwrap(); status_tx .send(TransactionStatusEvent::Success( TransactionConfirmationStatus::Confirmed, @@ -2724,7 +2724,7 @@ mod tests { "transaction is not found in the SVM" ); assert!( - state_reader.transactions.get(&sig).is_some(), + state_reader.transactions.get(&sig.to_string()).unwrap().is_some(), "transaction is not found in the history" ); } @@ -4565,13 +4565,13 @@ mod tests { ..Default::default() }; let mutated_accounts = std::collections::HashSet::new(); - writer.transactions.insert( - sig, + writer.transactions.store( + sig.to_string(), SurfnetTransactionStatus::processed( tx_with_status_meta, mutated_accounts, ), - ); + ).unwrap(); status_tx .send(TransactionStatusEvent::Success( TransactionConfirmationStatus::Processed, diff --git a/crates/core/src/rpc/surfnet_cheatcodes.rs b/crates/core/src/rpc/surfnet_cheatcodes.rs index a413f20d5..2aa1c20e9 100644 --- a/crates/core/src/rpc/surfnet_cheatcodes.rs +++ b/crates/core/src/rpc/surfnet_cheatcodes.rs @@ -1571,21 +1571,24 @@ impl SurfnetCheatcodes for SurfnetCheatcodesRpc { )> = svm_locker.with_svm_reader(|svm_reader| { svm_reader .transactions - .iter() - .map(|(sig, status)| { - let (transaction_with_status_meta, _) = status.expect_processed(); - ( - sig.to_string(), - transaction_with_status_meta.slot, - transaction_with_status_meta.meta.status.clone().err(), - transaction_with_status_meta - .meta - .log_messages - .clone() - .unwrap_or_default(), - ) + .into_iter() + .map(|iter| { + iter.map(|(sig, status)| { + let (transaction_with_status_meta, _) = status.expect_processed(); + ( + sig, + transaction_with_status_meta.slot, + transaction_with_status_meta.meta.status.clone().err(), + transaction_with_status_meta + .meta + .log_messages + .clone() + .unwrap_or_default(), + ) + }) + .collect() }) - .collect() + .unwrap_or_default() }); items.sort_by(|a, b| b.1.cmp(&a.1)); diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index a5ede68bf..535a51759 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -613,57 +613,60 @@ impl SurfnetSvmLocker { let sigs: Vec<_> = svm_reader .transactions - .iter() - .filter_map(|(sig, status)| { - let ( - TransactionWithStatusMeta { - slot, - transaction, - meta, - }, - _, - ) = status.expect_processed(); - - if *slot < config.clone().min_context_slot.unwrap_or_default() { - return None; - } - - if Some(sig.to_string()) == config_before { - before_slot = Some(*slot); - } + .into_iter() + .map(|iter| { + iter.filter_map(|(sig, status)| { + let ( + TransactionWithStatusMeta { + slot, + transaction, + meta, + }, + _, + ) = status.expect_processed(); + + if *slot < config.clone().min_context_slot.unwrap_or_default() { + return None; + } - if Some(sig.to_string()) == config_until { - until_slot = Some(*slot); - } + if Some(sig.clone()) == config_before { + before_slot = Some(*slot); + } - // Check if the pubkey is a signer + if Some(sig.clone()) == config_until { + until_slot = Some(*slot); + } - if !transaction.message.static_account_keys().contains(pubkey) { - return None; - } + // Check if the pubkey is a signer - // Determine confirmation status - let confirmation_status = match current_slot { - cs if cs == *slot => SolanaTransactionConfirmationStatus::Processed, - cs if cs < slot + FINALIZATION_SLOT_THRESHOLD => { - SolanaTransactionConfirmationStatus::Confirmed + if !transaction.message.static_account_keys().contains(pubkey) { + return None; } - _ => SolanaTransactionConfirmationStatus::Finalized, - }; - Some(RpcConfirmedTransactionStatusWithSignature { - err: match &meta.status { - Ok(_) => None, - Err(e) => Some(e.clone().into()), - }, - slot: *slot, - memo: None, - block_time: None, - confirmation_status: Some(confirmation_status), - signature: sig.to_string(), + // Determine confirmation status + let confirmation_status = match current_slot { + cs if cs == *slot => SolanaTransactionConfirmationStatus::Processed, + cs if cs < *slot + FINALIZATION_SLOT_THRESHOLD => { + SolanaTransactionConfirmationStatus::Confirmed + } + _ => SolanaTransactionConfirmationStatus::Finalized, + }; + + Some(RpcConfirmedTransactionStatusWithSignature { + err: match &meta.status { + Ok(_) => None, + Err(e) => Some(e.clone().into()), + }, + slot: *slot, + memo: None, + block_time: None, + confirmation_status: Some(confirmation_status), + signature: sig, + }) }) + .collect() }) - .collect(); + .unwrap_or_default(); sigs.into_iter() .filter(|sig| { @@ -749,7 +752,7 @@ impl SurfnetSvmLocker { self.with_svm_reader(|svm_reader| { let latest_absolute_slot = svm_reader.get_latest_absolute_slot(); - let Some(entry) = svm_reader.transactions.get(signature) else { + let Some(entry) = svm_reader.transactions.get(&signature.to_string())? else { return Ok(GetTransactionResult::None(*signature)); }; @@ -1321,13 +1324,13 @@ impl SurfnetSvmLocker { token_programs, loaded_addresses.clone().unwrap_or_default(), ); - svm_writer.transactions.insert( - signature, + svm_writer.transactions.store( + signature.to_string(), SurfnetTransactionStatus::processed( transaction_with_status_meta, HashSet::new(), ), - ); + )?; svm_writer.transactions_queued_for_confirmation.push_back(( transaction.clone(), @@ -1353,7 +1356,8 @@ impl SurfnetSvmLocker { meta_canonical, Some(err.clone()), )); - }); + Ok::<(), SurfpoolError>(()) + })?; } Ok(ProfileResult::new( pre_execution_capture, @@ -1484,13 +1488,13 @@ impl SurfnetSvmLocker { &post_token_program_ids, loaded_addresses.clone().unwrap_or_default(), ); - svm_writer.transactions.insert( - transaction_meta.signature, + svm_writer.transactions.store( + transaction_meta.signature.to_string(), SurfnetTransactionStatus::processed( transaction_with_status_meta.clone(), mutated_account_pubkeys, ), - ); + )?; let _ = svm_writer .simnet_events_tx diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index a90cd3624..0a124f464 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -218,7 +218,7 @@ pub struct SurfnetSvm { pub remote_rpc_url: Option, pub chain_tip: BlockIdentifier, pub blocks: Box>, - pub transactions: HashMap, + pub transactions: Box>, pub transactions_queued_for_confirmation: VecDeque<( VersionedTransaction, Sender, @@ -325,6 +325,7 @@ impl SurfnetSvm { HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); let blocks_db = new_kv_store(&database_url, "blocks", surfnet_id)?; + let transactions_db = new_kv_store(&database_url, "transactions", surfnet_id)?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() @@ -344,7 +345,7 @@ impl SurfnetSvm { remote_rpc_url: None, chain_tip, blocks: blocks_db, - transactions: HashMap::new(), + transactions: transactions_db, perf_samples: VecDeque::new(), transactions_processed: 0, simnet_events_tx, @@ -606,8 +607,8 @@ impl SurfnetSvm { .get_account(&system_program::id())? .map(|a| a.lamports()) .unwrap_or(1); - self.transactions.insert( - *tx.get_signature(), + self.transactions.store( + tx.get_signature().to_string(), SurfnetTransactionStatus::processed( TransactionWithStatusMeta { slot, @@ -638,7 +639,7 @@ impl SurfnetSvm { }, HashSet::from([*pubkey]), ), - ); + )?; self.notify_signature_subscribers( SignatureSubscriptionType::processed(), tx.get_signature(), @@ -1063,7 +1064,7 @@ impl SurfnetSvm { HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); self.blocks.clear()?; - self.transactions.clear(); + self.transactions.clear()?; self.transactions_queued_for_confirmation.clear(); self.transactions_queued_for_finalization.clear(); self.perf_samples.clear(); @@ -1305,7 +1306,7 @@ impl SurfnetSvm { ); let Some(SurfnetTransactionStatus::Processed(tx_data)) = - self.transactions.get(&signature) + self.transactions.get(&signature.to_string()).ok().flatten() else { continue; }; @@ -1354,7 +1355,7 @@ impl SurfnetSvm { error, ); let Some(SurfnetTransactionStatus::Processed(tx_data)) = - self.transactions.get(signature) + self.transactions.get(&signature.to_string()).ok().flatten() else { continue; }; @@ -2040,7 +2041,7 @@ impl SurfnetSvm { block .signatures .iter() - .filter_map(|sig| self.transactions.get(sig)) + .filter_map(|sig| self.transactions.get(&sig.to_string()).ok().flatten()) .map(|tx_with_meta| { let (meta, _) = tx_with_meta.expect_processed(); meta.encode( @@ -2060,7 +2061,7 @@ impl SurfnetSvm { block .signatures .iter() - .filter_map(|sig| self.transactions.get(sig)) + .filter_map(|sig| self.transactions.get(&sig.to_string()).ok().flatten()) .map(|tx_with_meta| { let (meta, _) = tx_with_meta.expect_processed(); meta.to_json_accounts( @@ -2321,7 +2322,7 @@ impl SurfnetSvm { // Get the tx accounts including loaded addresses let transaction_accounts = if let Some(SurfnetTransactionStatus::Processed(tx_data)) = - self.transactions.get(signature) + self.transactions.get(&signature.to_string()).ok().flatten() { let (tx_meta, _) = tx_data.as_ref(); let mut accounts = match &tx_meta.transaction.message { @@ -2615,8 +2616,8 @@ impl SurfnetSvm { pub fn get_transaction( &self, signature: &Signature, - ) -> SurfpoolResult> { - Ok(self.transactions.get(signature)) + ) -> SurfpoolResult> { + Ok(self.transactions.get(&signature.to_string())?) } pub fn start_runbook_execution(&mut self, runbook_id: String) { diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index c0819b37b..cfa268e62 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -4,6 +4,7 @@ use agave_reserved_account_keys::ReservedAccountKeys; use base64::{Engine, prelude::BASE64_STANDARD}; use chrono::Utc; use litesvm::types::TransactionMetadata; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use solana_account::Account; use solana_account_decoder::parse_token::UiTokenAmount; use solana_clock::{Epoch, Slot}; @@ -19,9 +20,11 @@ use solana_transaction::{ sanitized::SanitizedTransaction, versioned::{TransactionVersion, VersionedTransaction}, }; +use solana_transaction_context::TransactionReturnData; +use solana_transaction_error::TransactionError; use solana_transaction_status::{ Encodable, EncodableWithMeta, EncodeError, EncodedTransaction, - EncodedTransactionWithStatusMeta, InnerInstruction, InnerInstructions, + EncodedTransactionWithStatusMeta, InnerInstruction, InnerInstructions, Reward, TransactionBinaryEncoding, TransactionConfirmationStatus, TransactionStatus, TransactionStatusMeta, TransactionTokenBalance, UiAccountsList, UiLoadedAddresses, UiTransaction, UiTransactionEncoding, UiTransactionStatusMeta, @@ -37,7 +40,117 @@ use crate::{ surfnet::locker::{format_ui_amount, format_ui_amount_string}, }; -#[derive(Debug, Clone)] +/// Serializable version of TransactionTokenBalance +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SerializableTransactionTokenBalance { + pub account_index: u8, + pub mint: String, + pub ui_token_amount: UiTokenAmount, + pub owner: String, + pub program_id: String, +} + +impl From for SerializableTransactionTokenBalance { + fn from(ttb: TransactionTokenBalance) -> Self { + Self { + account_index: ttb.account_index, + mint: ttb.mint, + ui_token_amount: ttb.ui_token_amount, + owner: ttb.owner, + program_id: ttb.program_id, + } + } +} + +impl From for TransactionTokenBalance { + fn from(sttb: SerializableTransactionTokenBalance) -> Self { + Self { + account_index: sttb.account_index, + mint: sttb.mint, + ui_token_amount: sttb.ui_token_amount, + owner: sttb.owner, + program_id: sttb.program_id, + } + } +} + +/// Serializable version of TransactionStatusMeta +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SerializableTransactionStatusMeta { + pub status: Result<(), TransactionError>, + pub fee: u64, + pub pre_balances: Vec, + pub post_balances: Vec, + pub inner_instructions: Option>, + pub log_messages: Option>, + pub pre_token_balances: Option>, + pub post_token_balances: Option>, + pub rewards: Option>, + pub loaded_addresses: LoadedAddresses, + pub return_data: Option, + pub compute_units_consumed: Option, + pub cost_units: Option, +} + +impl From for SerializableTransactionStatusMeta { + fn from(meta: TransactionStatusMeta) -> Self { + Self { + status: meta.status, + fee: meta.fee, + pre_balances: meta.pre_balances, + post_balances: meta.post_balances, + inner_instructions: meta.inner_instructions, + log_messages: meta.log_messages, + pre_token_balances: meta + .pre_token_balances + .map(|v| v.into_iter().map(Into::into).collect()), + post_token_balances: meta + .post_token_balances + .map(|v| v.into_iter().map(Into::into).collect()), + rewards: meta.rewards, + loaded_addresses: meta.loaded_addresses, + return_data: meta.return_data, + compute_units_consumed: meta.compute_units_consumed, + cost_units: meta.cost_units, + } + } +} + +impl From for TransactionStatusMeta { + fn from(smeta: SerializableTransactionStatusMeta) -> Self { + Self { + status: smeta.status, + fee: smeta.fee, + pre_balances: smeta.pre_balances, + post_balances: smeta.post_balances, + inner_instructions: smeta.inner_instructions, + log_messages: smeta.log_messages, + pre_token_balances: smeta + .pre_token_balances + .map(|v| v.into_iter().map(Into::into).collect()), + post_token_balances: smeta + .post_token_balances + .map(|v| v.into_iter().map(Into::into).collect()), + rewards: smeta.rewards, + loaded_addresses: smeta.loaded_addresses, + return_data: smeta.return_data, + compute_units_consumed: smeta.compute_units_consumed, + cost_units: smeta.cost_units, + } + } +} + +/// Helper struct for serializing TransactionWithStatusMeta +/// Note: VersionedTransaction uses bincode internally, so we serialize it as base64-encoded bytes +#[derive(Serialize, Deserialize)] +struct SerializableTransactionWithStatusMeta { + pub slot: u64, + /// Base64-encoded bincode serialization of VersionedTransaction + pub transaction_bytes: String, + pub meta: SerializableTransactionStatusMeta, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub enum SurfnetTransactionStatus { Received, Processed(Box<(TransactionWithStatusMeta, HashSet)>), @@ -63,6 +176,47 @@ pub struct TransactionWithStatusMeta { pub meta: TransactionStatusMeta, } +impl Serialize for TransactionWithStatusMeta { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Serialize VersionedTransaction using bincode, then base64 encode + let tx_bytes = bincode::serialize(&self.transaction) + .map_err(|e| serde::ser::Error::custom(format!("bincode error: {}", e)))?; + let tx_base64 = BASE64_STANDARD.encode(&tx_bytes); + + let helper = SerializableTransactionWithStatusMeta { + slot: self.slot, + transaction_bytes: tx_base64, + meta: self.meta.clone().into(), + }; + helper.serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for TransactionWithStatusMeta { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let helper = SerializableTransactionWithStatusMeta::deserialize(deserializer)?; + + // Decode base64 and deserialize using bincode + let tx_bytes = BASE64_STANDARD + .decode(&helper.transaction_bytes) + .map_err(|e| serde::de::Error::custom(format!("base64 decode error: {}", e)))?; + let transaction: VersionedTransaction = bincode::deserialize(&tx_bytes) + .map_err(|e| serde::de::Error::custom(format!("bincode deserialize error: {}", e)))?; + + Ok(Self { + slot: helper.slot, + transaction, + meta: helper.meta.into(), + }) + } +} + impl TransactionWithStatusMeta { pub fn into_status(&self, current_slot: u64) -> TransactionStatus { TransactionStatus { From 185eeb1be4155814e4e828a1c3333616135d88db Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Thu, 8 Jan 2026 16:09:28 -0500 Subject: [PATCH 34/54] fix sqlite db file naming and cleanup files on shutdown --- crates/cli/src/cli/simnet/mod.rs | 9 +- crates/cli/src/tui/simnet.rs | 8 +- crates/core/src/runloops/mod.rs | 3 +- crates/core/src/storage/mod.rs | 5 + crates/core/src/storage/sqlite.rs | 103 +++++++++++++++++++- crates/core/src/surfnet/locker.rs | 10 ++ crates/core/src/surfnet/surfnet_lite_svm.rs | 7 ++ crates/core/src/surfnet/svm.rs | 8 ++ 8 files changed, 146 insertions(+), 7 deletions(-) diff --git a/crates/cli/src/cli/simnet/mod.rs b/crates/cli/src/cli/simnet/mod.rs index 3b455dbf1..7fb8abde1 100644 --- a/crates/cli/src/cli/simnet/mod.rs +++ b/crates/cli/src/cli/simnet/mod.rs @@ -139,7 +139,7 @@ pub async fn handle_start_local_surfnet_command( let config_copy = config.clone(); let simnet_events_tx_for_thread = simnet_events_tx.clone(); - let _handle = hiro_system_kit::thread_named("simnet") + let simnet_handle = hiro_system_kit::thread_named("simnet") .spawn(move || { let future = start_local_surfnet( surfnet_svm, @@ -228,6 +228,9 @@ pub async fn handle_start_local_surfnet_command( ) .await; + // Wait for the simnet thread to finish cleanup (including Drop/checkpoint) + let _ = simnet_handle.join(); + Ok(()) } @@ -275,6 +278,7 @@ async fn start_service( if let Some(explorer_handle) = explorer_handle { let _ = explorer_handle.stop(true).await; } + Ok(()) } @@ -288,8 +292,11 @@ fn log_events( ) -> Result<(), String> { let mut deployment_completed = false; let do_stop_loop = runloop_terminator.clone(); + let terminate_tx = simnet_commands_tx.clone(); ctrlc::set_handler(move || { do_stop_loop.store(true, Ordering::Relaxed); + // Send terminate command to allow graceful shutdown (Drop to run) + let _ = terminate_tx.send(SimnetCommand::Terminate(None)); }) .expect("Error setting Ctrl-C handler"); diff --git a/crates/cli/src/tui/simnet.rs b/crates/cli/src/tui/simnet.rs index 27d1e2c15..911a18f3c 100644 --- a/crates/cli/src/tui/simnet.rs +++ b/crates/cli/src/tui/simnet.rs @@ -705,10 +705,16 @@ fn run_app(terminal: &mut Terminal, mut app: App) -> io::Result<( if key_event.kind == KeyEventKind::Press { use KeyCode::*; if key_event.modifiers == KeyModifiers::CONTROL && key_event.code == Char('c') { + // Send terminate command to allow graceful shutdown (Drop to run) + let _ = app.simnet_commands_tx.send(SimnetCommand::Terminate(None)); return Ok(()); } match key_event.code { - Char('q') | Esc => return Ok(()), + Char('q') | Esc => { + // Send terminate command to allow graceful shutdown (Drop to run) + let _ = app.simnet_commands_tx.send(SimnetCommand::Terminate(None)); + return Ok(()); + } Down => app.next(), Up => app.previous(), Char('f') | Char('j') => { diff --git a/crates/core/src/runloops/mod.rs b/crates/core/src/runloops/mod.rs index aeea91605..af62380bb 100644 --- a/crates/core/src/runloops/mod.rs +++ b/crates/core/src/runloops/mod.rs @@ -320,7 +320,8 @@ pub async fn start_block_production_runloop( } } SimnetCommand::Terminate(_) => { - let _ = svm_locker.simnet_events_tx().send(SimnetEvent::Aborted("Terminated due to inactivity.".to_string())); + // Explicitly shutdown storage to trigger WAL checkpoint before exiting + svm_locker.shutdown(); break; } SimnetCommand::StartRunbookExecution(runbook_id) => { diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index 1f0e4890d..a33a5d778 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -200,6 +200,11 @@ pub trait Storage: Send + Sync { Ok(self.get(key)?.is_some()) } + /// Explicitly shutdown the storage, performing any cleanup like WAL checkpoint. + /// This should be called before the application exits to ensure data is persisted. + /// Default implementation does nothing. + fn shutdown(&self) {} + // Enable cloning of boxed trait objects fn clone_box(&self) -> Box>; } diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index 92647b4f9..23bf3f338 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -1,3 +1,6 @@ +use std::collections::HashSet; +use std::sync::{Mutex, OnceLock}; + use log::debug; use serde::{Deserialize, Serialize}; use surfpool_db::diesel::{ @@ -10,6 +13,14 @@ use surfpool_db::diesel::{ use crate::storage::{Storage, StorageConstructor, StorageError, StorageResult}; +/// Track which database files have already been checkpointed during shutdown. +/// This prevents multiple SqliteStorage instances sharing the same file from +/// conflicting when each tries to checkpoint and delete WAL files. +fn checkpointed_databases() -> &'static Mutex> { + static CHECKPOINTED: OnceLock>> = OnceLock::new(); + CHECKPOINTED.get_or_init(|| Mutex::new(HashSet::new())) +} + #[derive(QueryableByName, Debug)] struct KvRecord { #[diesel(sql_type = Text)] @@ -36,10 +47,84 @@ pub struct SqliteStorage { _phantom: std::marker::PhantomData<(K, V)>, table_name: String, surfnet_id: u32, + /// Whether this is a file-based database (not :memory:) + /// Used to determine if WAL checkpoint should be performed on drop + is_file_based: bool, + /// The connection string for creating direct connections during cleanup + connection_string: String, } const NAME: &str = "SQLite"; +// Checkpoint implementation that doesn't require K, V bounds +impl SqliteStorage { + /// Checkpoint the WAL and truncate it to consolidate into the main database file, + /// then remove the -wal and -shm files. + /// Only runs for file-based databases (not :memory:). + /// Uses a static set to track which databases have been checkpointed to avoid + /// conflicts when multiple SqliteStorage instances share the same database file. + fn checkpoint(&self) { + if !self.is_file_based { + return; + } + + // Extract the file path from the connection string + // Connection string is like "file:/path/to/db.sqlite?mode=rwc" + let db_path = self + .connection_string + .strip_prefix("file:") + .and_then(|s| s.split('?').next()) + .unwrap_or(&self.connection_string) + .to_string(); + + // Check if this database has already been checkpointed by another storage instance + { + let mut checkpointed = checkpointed_databases().lock().unwrap(); + if checkpointed.contains(&db_path) { + debug!( + "Database {} already checkpointed, skipping for table '{}'", + db_path, self.table_name + ); + return; + } + checkpointed.insert(db_path.clone()); + } + + debug!( + "Checkpointing WAL for database '{}' (table '{}')", + db_path, self.table_name + ); + + // Use pool connection to checkpoint - this flushes WAL to main database + if let Ok(mut conn) = self.pool.get() { + if let Err(e) = conn.batch_execute("PRAGMA wal_checkpoint(TRUNCATE);") { + debug!("WAL checkpoint failed: {}", e); + return; + } + } + + // Remove the -wal and -shm files + let wal_path = format!("{}-wal", db_path); + let shm_path = format!("{}-shm", db_path); + + if std::path::Path::new(&wal_path).exists() { + if let Err(e) = std::fs::remove_file(&wal_path) { + debug!("Failed to remove WAL file {}: {}", wal_path, e); + } else { + debug!("Removed WAL file: {}", wal_path); + } + } + + if std::path::Path::new(&shm_path).exists() { + if let Err(e) = std::fs::remove_file(&shm_path) { + debug!("Failed to remove SHM file {}: {}", shm_path, e); + } else { + debug!("Removed SHM file: {}", shm_path); + } + } + } +} + impl SqliteStorage where K: Serialize + for<'de> Deserialize<'de>, @@ -258,6 +343,10 @@ where Box::new(self.clone()) } + fn shutdown(&self) { + self.checkpoint(); + } + fn into_iter(&self) -> StorageResult + '_>> { debug!( "Creating iterator for all key-value pairs in table '{}'", @@ -313,27 +402,33 @@ where database_url, table_name, surfnet_id ); - let connection_string = if database_url != ":memory:" { - // Add connection string parameters to avoid readonly issues + let connection_string = if database_url == ":memory:" { + database_url.to_string() + } else if database_url.starts_with("file:") { + // Already a URI, just add mode if needed if database_url.contains('?') { format!("{}&mode=rwc", database_url) } else { format!("{}?mode=rwc", database_url) } } else { - database_url.to_string() + // Convert plain path to file: URI format for proper parameter handling + format!("file:{}?mode=rwc", database_url) }; - let manager = ConnectionManager::::new(connection_string); + let manager = ConnectionManager::::new(connection_string.clone()); trace!("Creating connection pool"); let pool = Pool::new(manager).map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; + let is_file_based = database_url != ":memory:"; let storage = SqliteStorage { pool, _phantom: std::marker::PhantomData, table_name: table_name.to_string(), surfnet_id, + is_file_based, + connection_string, }; // Set SQLite pragmas for performance and reliability diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 535a51759..6e0e950f4 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -134,6 +134,16 @@ impl Clone for SurfnetSvmLocker { /// Functions for reading and writing to the underlying SurfnetSvm instance impl SurfnetSvmLocker { + /// Explicitly shutdown the SVM, performing cleanup like WAL checkpoint for SQLite. + /// This should be called before the application exits to ensure data is persisted. + pub fn shutdown(&self) { + let read_lock = self.0.clone(); + tokio::task::block_in_place(move || { + let read_guard = read_lock.blocking_read(); + read_guard.shutdown(); + }); + } + /// Executes a read-only operation on the underlying `SurfnetSvm` by acquiring a blocking read lock. /// Accepts a closure that receives a shared reference to `SurfnetSvm` and returns a value. /// diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs index b301b2b9d..d4f3ec216 100644 --- a/crates/core/src/surfnet/surfnet_lite_svm.rs +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -54,6 +54,13 @@ impl SurfnetLiteSvm { Ok(self) } + /// Explicitly shutdown the storage, performing cleanup like WAL checkpoint for SQLite. + pub fn shutdown(&self) { + if let Some(db) = &self.db { + db.shutdown(); + } + } + pub fn reset(&mut self, feature_set: FeatureSet) -> SurfpoolResult<()> { self.svm = LiteSVM::new() .with_blockhash_check(false) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 0a124f464..b53eb79f7 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -293,6 +293,14 @@ impl SurfnetSvm { Self::_new(database_url, surfnet_id) } + /// Explicitly shutdown the SVM, performing cleanup like WAL checkpoint for SQLite. + /// This should be called before the application exits to ensure data is persisted. + pub fn shutdown(&self) { + self.inner.shutdown(); + self.blocks.shutdown(); + self.transactions.shutdown(); + } + /// Creates a new instance of `SurfnetSvm`. /// /// Returns a tuple containing the SVM instance, a receiver for simulation events, and a receiver for Geyser plugin events. From 4da71eabd45ae3186dcd294b854b6e0458f81911 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 9 Jan 2026 11:24:09 -0500 Subject: [PATCH 35/54] upgrade `token_accounts` index to use Storage trait --- crates/core/src/surfnet/locker.rs | 15 +++++++- crates/core/src/surfnet/svm.rs | 58 ++++++++++++++++++++++------- crates/core/src/types.rs | 62 +++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+), 16 deletions(-) diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 6e0e950f4..6a81222df 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -1014,7 +1014,14 @@ impl SurfnetSvmLocker { let token_accounts_before = transaction_accounts .iter() .enumerate() - .filter_map(|(i, p)| svm_reader.token_accounts.get(p).cloned().map(|a| (i, a))) + .filter_map(|(i, p)| { + svm_reader + .token_accounts + .get(&p.to_string()) + .ok() + .flatten() + .map(|a| (i, a)) + }) .collect::>(); let token_programs = token_accounts_before @@ -1457,7 +1464,11 @@ impl SurfnetSvmLocker { .zip(accounts_after.iter()) .enumerate() { - let token_account = svm_writer.token_accounts.get(pubkey).cloned(); + let token_account = svm_writer + .token_accounts + .get(&pubkey.to_string()) + .ok() + .flatten(); post_execution_capture.insert(*pubkey, account.clone()); if let Some(token_account) = token_account { diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index b53eb79f7..5a18bd8ab 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -247,7 +247,7 @@ pub struct SurfnetSvm { pub start_time: SystemTime, pub accounts_by_owner: HashMap>, pub account_associated_data: HashMap, - pub token_accounts: HashMap, + pub token_accounts: Box>, pub token_mints: HashMap, pub token_accounts_by_owner: HashMap>, pub token_accounts_by_delegate: HashMap>, @@ -299,6 +299,7 @@ impl SurfnetSvm { self.inner.shutdown(); self.blocks.shutdown(); self.transactions.shutdown(); + self.token_accounts.shutdown(); } /// Creates a new instance of `SurfnetSvm`. @@ -317,7 +318,8 @@ impl SurfnetSvm { // todo: consider making this configurable via config feature_set.deactivate(&enable_extend_program_checked::id()); - let inner = SurfnetLiteSvm::new().initialize(feature_set.clone(), database_url, surfnet_id)?; + let inner = + SurfnetLiteSvm::new().initialize(feature_set.clone(), database_url, surfnet_id)?; let native_mint_account = inner .get_account(&spl_token_interface::native_mint::ID)? @@ -334,6 +336,7 @@ impl SurfnetSvm { let blocks_db = new_kv_store(&database_url, "blocks", surfnet_id)?; let transactions_db = new_kv_store(&database_url, "transactions", surfnet_id)?; + let token_accounts_db = new_kv_store(&database_url, "token_accounts", surfnet_id)?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() @@ -380,7 +383,7 @@ impl SurfnetSvm { start_time: SystemTime::now(), accounts_by_owner, account_associated_data: HashMap::new(), - token_accounts: HashMap::new(), + token_accounts: token_accounts_db, token_mints, token_accounts_by_owner: HashMap::new(), token_accounts_by_delegate: HashMap::new(), @@ -969,8 +972,8 @@ impl SurfnetSvm { delegate_accounts.push(*pubkey); } } - - self.token_accounts.insert(*pubkey, token_account); + self.token_accounts + .store(pubkey.to_string(), token_account)?; } if let Ok(mint_account) = MintAccount::unpack(&account.data) { @@ -1018,7 +1021,7 @@ impl SurfnetSvm { // if it was a token account, remove from token indexes if is_supported_token_program(&old_account.owner) { - if let Some(old_token_account) = self.token_accounts.remove(pubkey) { + if let Some(old_token_account) = self.token_accounts.take(&pubkey.to_string())? { if let Some(accounts) = self .token_accounts_by_owner .get_mut(&old_token_account.owner()) @@ -1081,7 +1084,7 @@ impl SurfnetSvm { self.simulated_transaction_profiles.clear(); self.accounts_by_owner = accounts_by_owner; self.account_associated_data.clear(); - self.token_accounts.clear(); + self.token_accounts.clear()?; self.token_mints = token_mints; self.token_accounts_by_owner.clear(); self.token_accounts_by_delegate.clear(); @@ -2149,7 +2152,11 @@ impl SurfnetSvm { let token_mint = if let Some(mint) = token_mint { Some(mint) } else { - self.token_accounts.get(pubkey).map(|ta| ta.mint()) + self.token_accounts + .get(&pubkey.to_string()) + .ok() + .flatten() + .map(|ta| ta.mint()) }; token_mint.and_then(|mint| self.account_associated_data.get(&mint).cloned()) @@ -2189,7 +2196,13 @@ impl SurfnetSvm { if let Some(account_pubkeys) = self.token_accounts_by_delegate.get(delegate) { account_pubkeys .iter() - .filter_map(|pk| self.token_accounts.get(pk).map(|ta| (*pk, *ta))) + .filter_map(|pk| { + self.token_accounts + .get(&pk.to_string()) + .ok() + .flatten() + .map(|ta| (*pk, ta)) + }) .collect() } else { Vec::new() @@ -2212,7 +2225,13 @@ impl SurfnetSvm { if let Some(account_pubkeys) = self.token_accounts_by_owner.get(owner) { account_pubkeys .iter() - .filter_map(|pk| self.token_accounts.get(pk).map(|ta| (*pk, *ta))) + .filter_map(|pk| { + self.token_accounts + .get(&pk.to_string()) + .ok() + .flatten() + .map(|ta| (*pk, ta)) + }) .collect() } else { Vec::new() @@ -2253,7 +2272,13 @@ impl SurfnetSvm { if let Some(account_pubkeys) = self.token_accounts_by_mint.get(mint) { account_pubkeys .iter() - .filter_map(|pk| self.token_accounts.get(pk).map(|ta| (*pk, *ta))) + .filter_map(|pk| { + self.token_accounts + .get(&pk.to_string()) + .ok() + .flatten() + .map(|ta| (*pk, ta)) + }) .collect() } else { Vec::new() @@ -2945,7 +2970,7 @@ mod tests { svm.set_account(&token_account_pubkey, account).unwrap(); // test all indexes were created correctly - assert_eq!(svm.token_accounts.len(), 1); + assert_eq!(svm.token_accounts.keys().unwrap().len(), 1); // test owner index let owner_accounts = svm.get_parsed_token_accounts_by_owner(&owner); @@ -3055,7 +3080,7 @@ mod tests { svm.set_account(&system_account_pubkey, account).unwrap(); // should be in general registry but not token indexes - assert_eq!(svm.token_accounts.len(), 0); + assert_eq!(svm.token_accounts.keys().unwrap().len(), 0); assert_eq!(svm.token_accounts_by_owner.len(), 0); assert_eq!(svm.token_accounts_by_delegate.len(), 0); assert_eq!(svm.token_accounts_by_mint.len(), 0); @@ -3888,6 +3913,11 @@ mod tests { 0 ); assert_eq!(svm.get_token_accounts_by_delegate(&delegate).len(), 0); - assert!(svm.token_accounts.get(&token_account_pubkey).is_none()); + assert!( + svm.token_accounts + .get(&token_account_pubkey.to_string()) + .unwrap() + .is_none() + ); } } diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index cfa268e62..4a0379dab 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -869,6 +869,68 @@ impl TokenAccount { } } +impl Serialize for TokenAccount { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Use discriminant byte (0 = SplToken2022, 1 = SplToken) + packed bytes, then base64 encode + let mut bytes = Vec::with_capacity(1 + spl_token_2022_interface::state::Account::LEN); + match self { + Self::SplToken2022(account) => { + bytes.push(0u8); + let mut dst = [0u8; spl_token_2022_interface::state::Account::LEN]; + account.pack_into_slice(&mut dst); + bytes.extend_from_slice(&dst); + } + Self::SplToken(account) => { + bytes.push(1u8); + let mut dst = [0u8; spl_token_interface::state::Account::LEN]; + account.pack_into_slice(&mut dst); + bytes.extend_from_slice(&dst); + } + } + let encoded = BASE64_STANDARD.encode(&bytes); + serializer.serialize_str(&encoded) + } +} + +impl<'de> Deserialize<'de> for TokenAccount { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let encoded = String::deserialize(deserializer)?; + let bytes = BASE64_STANDARD + .decode(&encoded) + .map_err(serde::de::Error::custom)?; + + if bytes.is_empty() { + return Err(serde::de::Error::custom("Empty TokenAccount bytes")); + } + + let discriminant = bytes[0]; + let data = &bytes[1..]; + + match discriminant { + 0 => { + let account = spl_token_2022_interface::state::Account::unpack(data) + .map_err(serde::de::Error::custom)?; + Ok(TokenAccount::SplToken2022(account)) + } + 1 => { + let account = spl_token_interface::state::Account::unpack(data) + .map_err(serde::de::Error::custom)?; + Ok(TokenAccount::SplToken(account)) + } + _ => Err(serde::de::Error::custom(format!( + "Unknown TokenAccount discriminant: {}", + discriminant + ))), + } + } +} + #[derive(Debug, Clone)] pub enum MintAccount { SplToken2022(spl_token_2022_interface::state::Mint), From 4ba7dee9d62367b919a9ff2f8e196dcb6426c1b1 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 9 Jan 2026 11:36:36 -0500 Subject: [PATCH 36/54] refactor: update token_mints to use Storage trait and improve serialization/deserialization for MintAccount --- crates/core/src/surfnet/locker.rs | 17 ++++++--- crates/core/src/surfnet/svm.rs | 24 +++++++----- crates/core/src/types.rs | 62 +++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 14 deletions(-) diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 6a81222df..1c6e05b25 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -1306,8 +1306,9 @@ impl SurfnetSvmLocker { .map(|(_, a)| { svm_reader .token_mints - .get(&a.mint()) - .cloned() + .get(&a.mint().to_string()) + .ok() + .flatten() .ok_or(SurfpoolError::token_mint_not_found(a.mint())) }) .collect::, SurfpoolError>>() @@ -1487,9 +1488,10 @@ impl SurfnetSvmLocker { .map(|(_, a)| { svm_writer .token_mints - .get(&a.mint()) + .get(&a.mint().to_string()) + .ok() + .flatten() .ok_or(SurfpoolError::token_mint_not_found(a.mint())) - .cloned() }) .collect::, SurfpoolError>>()?; @@ -2016,7 +2018,12 @@ impl SurfnetSvmLocker { let token_accounts = svm_reader.get_token_accounts_by_mint(mint); // get mint information to determine decimals - let mint_decimals = if let Some(mint_account) = svm_reader.token_mints.get(mint) { + let mint_decimals = if let Some(mint_account) = svm_reader + .token_mints + .get(&mint.to_string()) + .ok() + .flatten() + { mint_account.decimals() } else { 0 diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 5a18bd8ab..5bba7d383 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -248,7 +248,7 @@ pub struct SurfnetSvm { pub accounts_by_owner: HashMap>, pub account_associated_data: HashMap, pub token_accounts: Box>, - pub token_mints: HashMap, + pub token_mints: Box>, pub token_accounts_by_owner: HashMap>, pub token_accounts_by_delegate: HashMap>, pub token_accounts_by_mint: HashMap>, @@ -300,6 +300,7 @@ impl SurfnetSvm { self.blocks.shutdown(); self.transactions.shutdown(); self.token_accounts.shutdown(); + self.token_mints.shutdown(); } /// Creates a new instance of `SurfnetSvm`. @@ -331,12 +332,15 @@ impl SurfnetSvm { native_mint_account.owner, vec![spl_token_interface::native_mint::ID], )]); - let token_mints = - HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); - let blocks_db = new_kv_store(&database_url, "blocks", surfnet_id)?; let transactions_db = new_kv_store(&database_url, "transactions", surfnet_id)?; let token_accounts_db = new_kv_store(&database_url, "token_accounts", surfnet_id)?; + let mut token_mints_db: Box> = + new_kv_store(&database_url, "token_mints", surfnet_id)?; + token_mints_db.store( + spl_token_interface::native_mint::ID.to_string(), + parsed_mint_account, + )?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() @@ -384,7 +388,7 @@ impl SurfnetSvm { accounts_by_owner, account_associated_data: HashMap::new(), token_accounts: token_accounts_db, - token_mints, + token_mints: token_mints_db, token_accounts_by_owner: HashMap::new(), token_accounts_by_delegate: HashMap::new(), token_accounts_by_mint: HashMap::new(), @@ -977,7 +981,7 @@ impl SurfnetSvm { } if let Ok(mint_account) = MintAccount::unpack(&account.data) { - self.token_mints.insert(*pubkey, mint_account); + self.token_mints.store(pubkey.to_string(), mint_account)?; } if let Ok(mint) = @@ -1071,8 +1075,6 @@ impl SurfnetSvm { native_mint_account.owner, vec![spl_token_interface::native_mint::ID], )]); - let token_mints = - HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); self.blocks.clear()?; self.transactions.clear()?; @@ -1085,7 +1087,11 @@ impl SurfnetSvm { self.accounts_by_owner = accounts_by_owner; self.account_associated_data.clear(); self.token_accounts.clear()?; - self.token_mints = token_mints; + self.token_mints.clear()?; + self.token_mints.store( + spl_token_interface::native_mint::ID.to_string(), + parsed_mint_account, + )?; self.token_accounts_by_owner.clear(); self.token_accounts_by_delegate.clear(); self.token_accounts_by_mint.clear(); diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index 4a0379dab..35394aa57 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -937,6 +937,68 @@ pub enum MintAccount { SplToken(spl_token_interface::state::Mint), } +impl Serialize for MintAccount { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Use discriminant byte (0 = SplToken2022, 1 = SplToken) + packed bytes, then base64 encode + let mut bytes = Vec::with_capacity(1 + spl_token_2022_interface::state::Mint::LEN); + match self { + Self::SplToken2022(mint) => { + bytes.push(0u8); + let mut dst = [0u8; spl_token_2022_interface::state::Mint::LEN]; + mint.pack_into_slice(&mut dst); + bytes.extend_from_slice(&dst); + } + Self::SplToken(mint) => { + bytes.push(1u8); + let mut dst = [0u8; spl_token_interface::state::Mint::LEN]; + mint.pack_into_slice(&mut dst); + bytes.extend_from_slice(&dst); + } + } + let encoded = BASE64_STANDARD.encode(&bytes); + serializer.serialize_str(&encoded) + } +} + +impl<'de> Deserialize<'de> for MintAccount { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let encoded = String::deserialize(deserializer)?; + let bytes = BASE64_STANDARD + .decode(&encoded) + .map_err(serde::de::Error::custom)?; + + if bytes.is_empty() { + return Err(serde::de::Error::custom("Empty MintAccount bytes")); + } + + let discriminant = bytes[0]; + let data = &bytes[1..]; + + match discriminant { + 0 => { + let mint = spl_token_2022_interface::state::Mint::unpack(data) + .map_err(serde::de::Error::custom)?; + Ok(MintAccount::SplToken2022(mint)) + } + 1 => { + let mint = spl_token_interface::state::Mint::unpack(data) + .map_err(serde::de::Error::custom)?; + Ok(MintAccount::SplToken(mint)) + } + _ => Err(serde::de::Error::custom(format!( + "Unknown MintAccount discriminant: {}", + discriminant + ))), + } + } +} + impl MintAccount { pub fn unpack(bytes: &[u8]) -> SurfpoolResult { if let Ok(mint) = From 94fae08870a0edc91cccfbee47dfbdcac327b513 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 9 Jan 2026 11:41:10 -0500 Subject: [PATCH 37/54] combine TokenAccount/TokenMint serde discriminant logic into one enum --- crates/core/src/types.rs | 57 ++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index 35394aa57..86d6e2298 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -717,6 +717,29 @@ impl RemoteRpcResult { } } +/// Discriminant byte used for serializing token program variants. +/// Ensures consistent encoding between TokenAccount and MintAccount. +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TokenProgramDiscriminant { + SplToken = 0, + SplToken2022 = 1, +} + +impl TokenProgramDiscriminant { + pub fn from_byte(byte: u8) -> Option { + match byte { + 0 => Some(Self::SplToken), + 1 => Some(Self::SplToken2022), + _ => None, + } + } + + pub fn as_byte(self) -> u8 { + self as u8 + } +} + #[derive(Debug, Copy, Clone, PartialEq)] pub enum TokenAccount { SplToken2022(spl_token_2022_interface::state::Account), @@ -874,17 +897,16 @@ impl Serialize for TokenAccount { where S: Serializer, { - // Use discriminant byte (0 = SplToken2022, 1 = SplToken) + packed bytes, then base64 encode let mut bytes = Vec::with_capacity(1 + spl_token_2022_interface::state::Account::LEN); match self { Self::SplToken2022(account) => { - bytes.push(0u8); + bytes.push(TokenProgramDiscriminant::SplToken2022.as_byte()); let mut dst = [0u8; spl_token_2022_interface::state::Account::LEN]; account.pack_into_slice(&mut dst); bytes.extend_from_slice(&dst); } Self::SplToken(account) => { - bytes.push(1u8); + bytes.push(TokenProgramDiscriminant::SplToken.as_byte()); let mut dst = [0u8; spl_token_interface::state::Account::LEN]; account.pack_into_slice(&mut dst); bytes.extend_from_slice(&dst); @@ -909,24 +931,22 @@ impl<'de> Deserialize<'de> for TokenAccount { return Err(serde::de::Error::custom("Empty TokenAccount bytes")); } - let discriminant = bytes[0]; + let discriminant = TokenProgramDiscriminant::from_byte(bytes[0]).ok_or_else(|| { + serde::de::Error::custom(format!("Unknown TokenAccount discriminant: {}", bytes[0])) + })?; let data = &bytes[1..]; match discriminant { - 0 => { + TokenProgramDiscriminant::SplToken2022 => { let account = spl_token_2022_interface::state::Account::unpack(data) .map_err(serde::de::Error::custom)?; Ok(TokenAccount::SplToken2022(account)) } - 1 => { + TokenProgramDiscriminant::SplToken => { let account = spl_token_interface::state::Account::unpack(data) .map_err(serde::de::Error::custom)?; Ok(TokenAccount::SplToken(account)) } - _ => Err(serde::de::Error::custom(format!( - "Unknown TokenAccount discriminant: {}", - discriminant - ))), } } } @@ -942,17 +962,16 @@ impl Serialize for MintAccount { where S: Serializer, { - // Use discriminant byte (0 = SplToken2022, 1 = SplToken) + packed bytes, then base64 encode let mut bytes = Vec::with_capacity(1 + spl_token_2022_interface::state::Mint::LEN); match self { Self::SplToken2022(mint) => { - bytes.push(0u8); + bytes.push(TokenProgramDiscriminant::SplToken2022.as_byte()); let mut dst = [0u8; spl_token_2022_interface::state::Mint::LEN]; mint.pack_into_slice(&mut dst); bytes.extend_from_slice(&dst); } Self::SplToken(mint) => { - bytes.push(1u8); + bytes.push(TokenProgramDiscriminant::SplToken.as_byte()); let mut dst = [0u8; spl_token_interface::state::Mint::LEN]; mint.pack_into_slice(&mut dst); bytes.extend_from_slice(&dst); @@ -977,24 +996,22 @@ impl<'de> Deserialize<'de> for MintAccount { return Err(serde::de::Error::custom("Empty MintAccount bytes")); } - let discriminant = bytes[0]; + let discriminant = TokenProgramDiscriminant::from_byte(bytes[0]).ok_or_else(|| { + serde::de::Error::custom(format!("Unknown MintAccount discriminant: {}", bytes[0])) + })?; let data = &bytes[1..]; match discriminant { - 0 => { + TokenProgramDiscriminant::SplToken2022 => { let mint = spl_token_2022_interface::state::Mint::unpack(data) .map_err(serde::de::Error::custom)?; Ok(MintAccount::SplToken2022(mint)) } - 1 => { + TokenProgramDiscriminant::SplToken => { let mint = spl_token_interface::state::Mint::unpack(data) .map_err(serde::de::Error::custom)?; Ok(MintAccount::SplToken(mint)) } - _ => Err(serde::de::Error::custom(format!( - "Unknown MintAccount discriminant: {}", - discriminant - ))), } } } From 65c79e564d20bff5493901f886368e7684368d52 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 9 Jan 2026 13:03:31 -0500 Subject: [PATCH 38/54] refactor: replace HashMap with Storage trait for token account indexing --- crates/core/src/surfnet/svm.rs | 193 +++++++++++++++++++++------------ 1 file changed, 125 insertions(+), 68 deletions(-) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 5bba7d383..dfb556d20 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -249,9 +249,9 @@ pub struct SurfnetSvm { pub account_associated_data: HashMap, pub token_accounts: Box>, pub token_mints: Box>, - pub token_accounts_by_owner: HashMap>, - pub token_accounts_by_delegate: HashMap>, - pub token_accounts_by_mint: HashMap>, + pub token_accounts_by_owner: Box>>, + pub token_accounts_by_delegate: Box>>, + pub token_accounts_by_mint: Box>>, pub total_supply: u64, pub circulating_supply: u64, pub non_circulating_supply: u64, @@ -301,6 +301,9 @@ impl SurfnetSvm { self.transactions.shutdown(); self.token_accounts.shutdown(); self.token_mints.shutdown(); + self.token_accounts_by_owner.shutdown(); + self.token_accounts_by_delegate.shutdown(); + self.token_accounts_by_mint.shutdown(); } /// Creates a new instance of `SurfnetSvm`. @@ -341,6 +344,12 @@ impl SurfnetSvm { spl_token_interface::native_mint::ID.to_string(), parsed_mint_account, )?; + let token_accounts_by_owner_db: Box>> = + new_kv_store(&database_url, "token_accounts_by_owner", surfnet_id)?; + let token_accounts_by_delegate_db: Box>> = + new_kv_store(&database_url, "token_accounts_by_delegate", surfnet_id)?; + let token_accounts_by_mint_db: Box>> = + new_kv_store(&database_url, "token_accounts_by_mint", surfnet_id)?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() @@ -389,9 +398,9 @@ impl SurfnetSvm { account_associated_data: HashMap::new(), token_accounts: token_accounts_db, token_mints: token_mints_db, - token_accounts_by_owner: HashMap::new(), - token_accounts_by_delegate: HashMap::new(), - token_accounts_by_mint: HashMap::new(), + token_accounts_by_owner: token_accounts_by_owner_db, + token_accounts_by_delegate: token_accounts_by_delegate_db, + token_accounts_by_mint: token_accounts_by_mint_db, total_supply: 0, circulating_supply: 0, non_circulating_supply: 0, @@ -949,31 +958,47 @@ impl SurfnetSvm { // if it's a token account, update token-specific indexes if is_supported_token_program(&account.owner) { if let Ok(token_account) = TokenAccount::unpack(&account.data) { + let pubkey_str = pubkey.to_string(); + // index by owner -> check for duplicates - let token_owner_accounts = self + let owner_key = token_account.owner().to_string(); + let mut token_owner_accounts = self .token_accounts_by_owner - .entry(token_account.owner()) - .or_default(); - - if !token_owner_accounts.contains(pubkey) { - token_owner_accounts.push(*pubkey); + .get(&owner_key) + .ok() + .flatten() + .unwrap_or_default(); + if !token_owner_accounts.contains(&pubkey_str) { + token_owner_accounts.push(pubkey_str.clone()); + self.token_accounts_by_owner + .store(owner_key, token_owner_accounts)?; } // index by mint -> check for duplicates - let mint_accounts = self + let mint_key = token_account.mint().to_string(); + let mut mint_accounts = self .token_accounts_by_mint - .entry(token_account.mint()) - .or_default(); - - if !mint_accounts.contains(pubkey) { - mint_accounts.push(*pubkey); + .get(&mint_key) + .ok() + .flatten() + .unwrap_or_default(); + if !mint_accounts.contains(&pubkey_str) { + mint_accounts.push(pubkey_str.clone()); + self.token_accounts_by_mint.store(mint_key, mint_accounts)?; } if let COption::Some(delegate) = token_account.delegate() { - let delegate_accounts = - self.token_accounts_by_delegate.entry(delegate).or_default(); - if !delegate_accounts.contains(pubkey) { - delegate_accounts.push(*pubkey); + let delegate_key = delegate.to_string(); + let mut delegate_accounts = self + .token_accounts_by_delegate + .get(&delegate_key) + .ok() + .flatten() + .unwrap_or_default(); + if !delegate_accounts.contains(&pubkey_str) { + delegate_accounts.push(pubkey_str); + self.token_accounts_by_delegate + .store(delegate_key, delegate_accounts)?; } } self.token_accounts @@ -1026,33 +1051,46 @@ impl SurfnetSvm { // if it was a token account, remove from token indexes if is_supported_token_program(&old_account.owner) { if let Some(old_token_account) = self.token_accounts.take(&pubkey.to_string())? { - if let Some(accounts) = self - .token_accounts_by_owner - .get_mut(&old_token_account.owner()) + let pubkey_str = pubkey.to_string(); + + let owner_key = old_token_account.owner().to_string(); + if let Some(mut accounts) = + self.token_accounts_by_owner.get(&owner_key).ok().flatten() { - accounts.retain(|pk| pk != pubkey); + accounts.retain(|pk| pk != &pubkey_str); if accounts.is_empty() { - self.token_accounts_by_owner - .remove(&old_token_account.owner()); + self.token_accounts_by_owner.take(&owner_key)?; + } else { + self.token_accounts_by_owner.store(owner_key, accounts)?; } } - if let Some(accounts) = self - .token_accounts_by_mint - .get_mut(&old_token_account.mint()) + let mint_key = old_token_account.mint().to_string(); + if let Some(mut accounts) = + self.token_accounts_by_mint.get(&mint_key).ok().flatten() { - accounts.retain(|pk| pk != pubkey); + accounts.retain(|pk| pk != &pubkey_str); if accounts.is_empty() { - self.token_accounts_by_mint - .remove(&old_token_account.mint()); + self.token_accounts_by_mint.take(&mint_key)?; + } else { + self.token_accounts_by_mint.store(mint_key, accounts)?; } } if let COption::Some(delegate) = old_token_account.delegate() { - if let Some(accounts) = self.token_accounts_by_delegate.get_mut(&delegate) { - accounts.retain(|pk| pk != pubkey); + let delegate_key = delegate.to_string(); + if let Some(mut accounts) = self + .token_accounts_by_delegate + .get(&delegate_key) + .ok() + .flatten() + { + accounts.retain(|pk| pk != &pubkey_str); if accounts.is_empty() { - self.token_accounts_by_delegate.remove(&delegate); + self.token_accounts_by_delegate.take(&delegate_key)?; + } else { + self.token_accounts_by_delegate + .store(delegate_key, accounts)?; } } } @@ -1092,9 +1130,9 @@ impl SurfnetSvm { spl_token_interface::native_mint::ID.to_string(), parsed_mint_account, )?; - self.token_accounts_by_owner.clear(); - self.token_accounts_by_delegate.clear(); - self.token_accounts_by_mint.clear(); + self.token_accounts_by_owner.clear()?; + self.token_accounts_by_delegate.clear()?; + self.token_accounts_by_mint.clear()?; self.non_circulating_accounts.clear(); self.registered_idls.clear(); self.runbook_executions.clear(); @@ -2199,15 +2237,21 @@ impl SurfnetSvm { /// /// * A vector of (account_pubkey, token_account) tuples for all token accounts delegated to the specified delegate. pub fn get_token_accounts_by_delegate(&self, delegate: &Pubkey) -> Vec<(Pubkey, TokenAccount)> { - if let Some(account_pubkeys) = self.token_accounts_by_delegate.get(delegate) { + if let Some(account_pubkeys) = self + .token_accounts_by_delegate + .get(&delegate.to_string()) + .ok() + .flatten() + { account_pubkeys .iter() - .filter_map(|pk| { + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; self.token_accounts - .get(&pk.to_string()) + .get(pk_str) .ok() .flatten() - .map(|ta| (*pk, ta)) + .map(|ta| (pk, ta)) }) .collect() } else { @@ -2228,15 +2272,21 @@ impl SurfnetSvm { &self, owner: &Pubkey, ) -> Vec<(Pubkey, TokenAccount)> { - if let Some(account_pubkeys) = self.token_accounts_by_owner.get(owner) { + if let Some(account_pubkeys) = self + .token_accounts_by_owner + .get(&owner.to_string()) + .ok() + .flatten() + { account_pubkeys .iter() - .filter_map(|pk| { + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; self.token_accounts - .get(&pk.to_string()) + .get(pk_str) .ok() .flatten() - .map(|ta| (*pk, ta)) + .map(|ta| (pk, ta)) }) .collect() } else { @@ -2248,21 +2298,22 @@ impl SurfnetSvm { &self, owner: &Pubkey, ) -> SurfpoolResult> { - Ok(self + let account_pubkeys = self .token_accounts_by_owner - .get(owner) - .map(|account_pubkeys| { - account_pubkeys - .iter() - .filter_map(|pk| { - self.get_account(pk) - .map(|res| res.map(|account| (*pk, account.clone()))) - .transpose() - }) - .collect::, SurfpoolError>>() + .get(&owner.to_string()) + .ok() + .flatten() + .unwrap_or_default(); + + account_pubkeys + .iter() + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; + self.get_account(&pk) + .map(|res| res.map(|account| (pk, account.clone()))) + .transpose() }) - .transpose()? - .unwrap_or_default()) + .collect::, SurfpoolError>>() } /// Gets all token accounts for a specific mint (token type). @@ -2275,15 +2326,21 @@ impl SurfnetSvm { /// /// * A vector of (account_pubkey, token_account) tuples for all token accounts of the specified mint. pub fn get_token_accounts_by_mint(&self, mint: &Pubkey) -> Vec<(Pubkey, TokenAccount)> { - if let Some(account_pubkeys) = self.token_accounts_by_mint.get(mint) { + if let Some(account_pubkeys) = self + .token_accounts_by_mint + .get(&mint.to_string()) + .ok() + .flatten() + { account_pubkeys .iter() - .filter_map(|pk| { + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; self.token_accounts - .get(&pk.to_string()) + .get(pk_str) .ok() .flatten() - .map(|ta| (*pk, ta)) + .map(|ta| (pk, ta)) }) .collect() } else { @@ -3087,9 +3144,9 @@ mod tests { // should be in general registry but not token indexes assert_eq!(svm.token_accounts.keys().unwrap().len(), 0); - assert_eq!(svm.token_accounts_by_owner.len(), 0); - assert_eq!(svm.token_accounts_by_delegate.len(), 0); - assert_eq!(svm.token_accounts_by_mint.len(), 0); + assert_eq!(svm.token_accounts_by_owner.keys().unwrap().len(), 0); + assert_eq!(svm.token_accounts_by_delegate.keys().unwrap().len(), 0); + assert_eq!(svm.token_accounts_by_mint.keys().unwrap().len(), 0); } fn expect_account_update_event( From f979d3d548aea17579b79a02004b7bfafc9d125b Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 9 Jan 2026 13:28:22 -0500 Subject: [PATCH 39/54] refactor: replace HashMap with Storage trait for accounts_by_owner in SurfnetSvm --- crates/core/src/surfnet/svm.rs | 84 +++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 36 deletions(-) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index dfb556d20..421c927dd 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -245,7 +245,7 @@ pub struct SurfnetSvm { pub updated_at: u64, pub slot_time: u64, pub start_time: SystemTime, - pub accounts_by_owner: HashMap>, + pub accounts_by_owner: Box>>, pub account_associated_data: HashMap, pub token_accounts: Box>, pub token_mints: Box>, @@ -301,6 +301,7 @@ impl SurfnetSvm { self.transactions.shutdown(); self.token_accounts.shutdown(); self.token_mints.shutdown(); + self.accounts_by_owner.shutdown(); self.token_accounts_by_owner.shutdown(); self.token_accounts_by_delegate.shutdown(); self.token_accounts_by_mint.shutdown(); @@ -331,10 +332,12 @@ impl SurfnetSvm { let parsed_mint_account = MintAccount::unpack(&native_mint_account.data).unwrap(); // Load native mint into owned account and token mint indexes - let accounts_by_owner = HashMap::from([( - native_mint_account.owner, - vec![spl_token_interface::native_mint::ID], - )]); + let mut accounts_by_owner_db: Box>> = + new_kv_store(&database_url, "accounts_by_owner", surfnet_id)?; + accounts_by_owner_db.store( + native_mint_account.owner.to_string(), + vec![spl_token_interface::native_mint::ID.to_string()], + )?; let blocks_db = new_kv_store(&database_url, "blocks", surfnet_id)?; let transactions_db = new_kv_store(&database_url, "transactions", surfnet_id)?; let token_accounts_db = new_kv_store(&database_url, "token_accounts", surfnet_id)?; @@ -394,7 +397,7 @@ impl SurfnetSvm { updated_at: Utc::now().timestamp_millis() as u64, slot_time: DEFAULT_SLOT_TIME_MS, start_time: SystemTime::now(), - accounts_by_owner, + accounts_by_owner: accounts_by_owner_db, account_associated_data: HashMap::new(), token_accounts: token_accounts_db, token_mints: token_mints_db, @@ -950,16 +953,22 @@ impl SurfnetSvm { self.remove_from_indexes(pubkey, &old_account)?; } // add to owner index (check for duplicates) - let owner_accounts = self.accounts_by_owner.entry(account.owner).or_default(); - if !owner_accounts.contains(pubkey) { - owner_accounts.push(*pubkey); + let owner_key = account.owner.to_string(); + let pubkey_str = pubkey.to_string(); + let mut owner_accounts = self + .accounts_by_owner + .get(&owner_key) + .ok() + .flatten() + .unwrap_or_default(); + if !owner_accounts.contains(&pubkey_str) { + owner_accounts.push(pubkey_str.clone()); + self.accounts_by_owner.store(owner_key, owner_accounts)?; } // if it's a token account, update token-specific indexes if is_supported_token_program(&account.owner) { if let Ok(token_account) = TokenAccount::unpack(&account.data) { - let pubkey_str = pubkey.to_string(); - // index by owner -> check for duplicates let owner_key = token_account.owner().to_string(); let mut token_owner_accounts = self @@ -1041,18 +1050,20 @@ impl SurfnetSvm { pubkey: &Pubkey, old_account: &Account, ) -> SurfpoolResult<()> { - if let Some(accounts) = self.accounts_by_owner.get_mut(&old_account.owner) { - accounts.retain(|pk| pk != pubkey); + let owner_key = old_account.owner.to_string(); + let pubkey_str = pubkey.to_string(); + if let Some(mut accounts) = self.accounts_by_owner.get(&owner_key).ok().flatten() { + accounts.retain(|pk| pk != &pubkey_str); if accounts.is_empty() { - self.accounts_by_owner.remove(&old_account.owner); + self.accounts_by_owner.take(&owner_key)?; + } else { + self.accounts_by_owner.store(owner_key, accounts)?; } } // if it was a token account, remove from token indexes if is_supported_token_program(&old_account.owner) { if let Some(old_token_account) = self.token_accounts.take(&pubkey.to_string())? { - let pubkey_str = pubkey.to_string(); - let owner_key = old_token_account.owner().to_string(); if let Some(mut accounts) = self.token_accounts_by_owner.get(&owner_key).ok().flatten() @@ -1108,12 +1119,6 @@ impl SurfnetSvm { .unwrap(); let parsed_mint_account = MintAccount::unpack(&native_mint_account.data).unwrap(); - // Load native mint into owned account and token mint indexes - let accounts_by_owner = HashMap::from([( - native_mint_account.owner, - vec![spl_token_interface::native_mint::ID], - )]); - self.blocks.clear()?; self.transactions.clear()?; self.transactions_queued_for_confirmation.clear(); @@ -1122,7 +1127,11 @@ impl SurfnetSvm { self.transactions_processed = 0; self.profile_tag_map.clear(); self.simulated_transaction_profiles.clear(); - self.accounts_by_owner = accounts_by_owner; + self.accounts_by_owner.clear()?; + self.accounts_by_owner.store( + native_mint_account.owner.to_string(), + vec![spl_token_interface::native_mint::ID.to_string()], + )?; self.account_associated_data.clear(); self.token_accounts.clear()?; self.token_mints.clear()?; @@ -2173,19 +2182,22 @@ impl SurfnetSvm { &self, program_id: &Pubkey, ) -> SurfpoolResult> { - let res = if let Some(account_pubkeys) = self.accounts_by_owner.get(program_id) { - account_pubkeys - .iter() - .filter_map(|pubkey| { - self.get_account(pubkey) - .map(|res| res.map(|account| (*pubkey, account.clone()))) - .transpose() - }) - .collect::, SurfpoolError>>()? - } else { - Vec::new() - }; - Ok(res) + let account_pubkeys = self + .accounts_by_owner + .get(&program_id.to_string()) + .ok() + .flatten() + .unwrap_or_default(); + + account_pubkeys + .iter() + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; + self.get_account(&pk) + .map(|res| res.map(|account| (pk, account.clone()))) + .transpose() + }) + .collect::, SurfpoolError>>() } fn get_additional_data( From 7d01f7588b74c48023c31c08934d962661d2a171 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 9 Jan 2026 14:34:33 -0500 Subject: [PATCH 40/54] refactor: update SurfnetSvm to use Storage trait for registered_idls and streamed_accounts, and improve IDL registration logic --- crates/core/src/rpc/surfnet_cheatcodes.rs | 9 +- crates/core/src/surfnet/locker.rs | 37 +++++--- crates/core/src/surfnet/svm.rs | 106 +++++++++++++++------- crates/types/src/types.rs | 21 +++-- 4 files changed, 115 insertions(+), 58 deletions(-) diff --git a/crates/core/src/rpc/surfnet_cheatcodes.rs b/crates/core/src/rpc/surfnet_cheatcodes.rs index 2aa1c20e9..80a0da5dd 100644 --- a/crates/core/src/rpc/surfnet_cheatcodes.rs +++ b/crates/core/src/rpc/surfnet_cheatcodes.rs @@ -1513,7 +1513,7 @@ impl SurfnetCheatcodes for SurfnetCheatcodesRpc { Ok(locker) => locker, Err(e) => return Err(e.into()), }; - svm_locker.register_idl(idl, slot); + svm_locker.register_idl(idl, slot)?; Ok(RpcResponse { context: RpcResponseContext::new(svm_locker.get_latest_absolute_slot()), value: (), @@ -1709,7 +1709,12 @@ impl SurfnetCheatcodes for SurfnetCheatcodesRpc { let svm_locker = meta.get_svm_locker()?; let value = svm_locker.with_svm_reader(|svm_reader| { - GetStreamedAccountsResponse::new(&svm_reader.streamed_accounts) + let accounts: Vec<_> = svm_reader + .streamed_accounts + .into_iter() + .map(|iter| iter.collect()) + .unwrap_or_default(); + GetStreamedAccountsResponse::from_iter(accounts) }); Ok(RpcResponse { diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 1c6e05b25..e4590e78f 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -1739,13 +1739,19 @@ impl SurfnetSvmLocker { self.with_svm_writer(|svm_writer| { svm_writer .streamed_accounts - .insert(pubkey, include_owned_accounts); - }); + .store(pubkey.to_string(), include_owned_accounts) + })?; Ok(()) } - pub fn get_streamed_accounts(&self) -> HashMap { - self.with_svm_reader(|svm_reader| svm_reader.streamed_accounts.clone()) + pub fn get_streamed_accounts(&self) -> Vec<(String, bool)> { + self.with_svm_reader(|svm_reader| { + svm_reader + .streamed_accounts + .into_iter() + .map(|iter| iter.collect()) + .unwrap_or_default() + }) } /// Removes an account from the closed accounts set. @@ -2381,19 +2387,26 @@ impl SurfnetSvmLocker { } } - pub fn register_idl(&self, idl: Idl, slot: Option) { + pub fn register_idl(&self, idl: Idl, slot: Option) -> SurfpoolResult<()> { self.with_svm_writer(|svm_writer| svm_writer.register_idl(idl, slot)) } pub fn get_idl(&self, address: &Pubkey, slot: Option) -> Option { self.with_svm_reader(|svm_reader| { let query_slot = slot.unwrap_or_else(|| svm_reader.get_latest_absolute_slot()); - svm_reader.registered_idls.get(address).and_then(|heap| { - heap.iter() - .filter(|VersionedIdl(s, _)| s <= &query_slot) - .max() - .map(|VersionedIdl(_, idl)| idl.clone()) - }) + // IDLs are stored sorted by slot descending, so the first one that passes the filter is the latest + svm_reader + .registered_idls + .get(&address.to_string()) + .ok() + .flatten() + .and_then(|idl_versions| { + idl_versions + .iter() + .filter(|VersionedIdl(s, _)| *s <= query_slot) + .max() + .map(|VersionedIdl(_, idl)| idl.clone()) + }) }) } @@ -3455,7 +3468,7 @@ mod tests { // Step 2: Register the IDL for this account let account_pubkey = Pubkey::from_str_const("rec5EKMGg6MxZYaMdyBfgwp4d5rB9T1VQH5pJv5LtFJ"); - svm_locker.register_idl(idl.clone(), None); + svm_locker.register_idl(idl.clone(), None).unwrap(); // Step 3: Create an account with the Pyth data let pyth_account = Account { diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 421c927dd..3762650bf 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -1,6 +1,6 @@ use std::{ cmp::max, - collections::{BTreeMap, BinaryHeap, HashMap, HashSet, VecDeque}, + collections::{BTreeMap, HashMap, HashSet, VecDeque}, str::FromStr, time::SystemTime, }; @@ -262,16 +262,16 @@ pub struct SurfnetSvm { /// For example, when an account is updated in the same slot multiple times, /// the update with higher write_version should supersede the one with lower write_version. pub write_version: u64, - pub registered_idls: HashMap>, + pub registered_idls: Box>>, // pub registered_idls: HashMap<[u8; 8], BinaryHeap>, pub feature_set: FeatureSet, pub instruction_profiling_enabled: bool, pub max_profiles: usize, pub runbook_executions: Vec, pub account_update_slots: HashMap, - pub streamed_accounts: HashMap, + pub streamed_accounts: Box>, pub recent_blockhashes: VecDeque<(SyntheticBlockhash, i64)>, - pub scheduled_overrides: HashMap>, + pub scheduled_overrides: Box>>, /// Tracks accounts that have been explicitly closed by the user. /// These accounts will not be fetched from mainnet even if they don't exist in the local cache. pub closed_accounts: HashSet, @@ -305,6 +305,9 @@ impl SurfnetSvm { self.token_accounts_by_owner.shutdown(); self.token_accounts_by_delegate.shutdown(); self.token_accounts_by_mint.shutdown(); + self.streamed_accounts.shutdown(); + self.scheduled_overrides.shutdown(); + self.registered_idls.shutdown(); } /// Creates a new instance of `SurfnetSvm`. @@ -353,6 +356,12 @@ impl SurfnetSvm { new_kv_store(&database_url, "token_accounts_by_delegate", surfnet_id)?; let token_accounts_by_mint_db: Box>> = new_kv_store(&database_url, "token_accounts_by_mint", surfnet_id)?; + let streamed_accounts_db: Box> = + new_kv_store(&database_url, "streamed_accounts", surfnet_id)?; + let scheduled_overrides_db: Box>> = + new_kv_store(&database_url, "scheduled_overrides", surfnet_id)?; + let registered_idls_db: Box>> = + new_kv_store(&database_url, "registered_idls", surfnet_id)?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() @@ -411,15 +420,15 @@ impl SurfnetSvm { genesis_config: GenesisConfig::default(), inflation: Inflation::default(), write_version: 0, - registered_idls: HashMap::new(), + registered_idls: registered_idls_db, feature_set, instruction_profiling_enabled: true, max_profiles: DEFAULT_PROFILING_MAP_CAPACITY, runbook_executions: Vec::new(), account_update_slots: HashMap::new(), - streamed_accounts: HashMap::new(), + streamed_accounts: streamed_accounts_db, recent_blockhashes: VecDeque::new(), - scheduled_overrides: HashMap::new(), + scheduled_overrides: scheduled_overrides_db, closed_accounts: HashSet::new(), }; @@ -562,7 +571,7 @@ impl SurfnetSvm { let registry = TemplateRegistry::new(); for (_, template) in registry.templates.into_iter() { - self.register_idl(template.idl, None); + let _ = self.register_idl(template.idl, None); } if let Some(remote_client) = remote_ctx { @@ -1143,10 +1152,10 @@ impl SurfnetSvm { self.token_accounts_by_delegate.clear()?; self.token_accounts_by_mint.clear()?; self.non_circulating_accounts.clear(); - self.registered_idls.clear(); + self.registered_idls.clear()?; self.runbook_executions.clear(); - self.streamed_accounts.clear(); - self.scheduled_overrides.clear(); + self.streamed_accounts.clear()?; + self.scheduled_overrides.clear()?; Ok(()) } @@ -1652,9 +1661,11 @@ impl SurfnetSvm { self.finalize_transactions()?; // Evict the accounts marked as streamed from cache to enforce them to be fetched again - let accounts_to_reset = self.streamed_accounts.clone(); - for (pubkey, include_owned_accounts) in accounts_to_reset.iter() { - self.reset_account(pubkey, *include_owned_accounts)?; + let accounts_to_reset: Vec<_> = self.streamed_accounts.into_iter()?.collect(); + for (pubkey_str, include_owned_accounts) in accounts_to_reset { + let pubkey = Pubkey::from_str(&pubkey_str) + .map_err(|e| SurfpoolError::invalid_pubkey(&pubkey_str, e.to_string()))?; + self.reset_account(&pubkey, include_owned_accounts)?; } Ok(()) @@ -1675,7 +1686,7 @@ impl SurfnetSvm { let current_slot = self.latest_epoch_info.absolute_slot; // Remove and get overrides for this slot - let Some(overrides) = self.scheduled_overrides.remove(¤t_slot) else { + let Some(overrides) = self.scheduled_overrides.take(¤t_slot)? else { // No overrides for this slot return Ok(()); }; @@ -1788,16 +1799,26 @@ impl SurfnetSvm { let owner_program_id = account.owner(); // Look up the IDL for the owner program - let Some(idl_versions) = self.registered_idls.get(owner_program_id) else { - warn!( - "No IDL registered for program {} (owner of account {}), skipping override {}", - owner_program_id, account_pubkey, override_instance.id - ); - continue; + let idl_versions = match self.registered_idls.get(&owner_program_id.to_string()) { + Ok(Some(versions)) => versions, + Ok(None) => { + warn!( + "No IDL registered for program {} (owner of account {}), skipping override {}", + owner_program_id, account_pubkey, override_instance.id + ); + continue; + } + Err(e) => { + warn!( + "Failed to get IDL for program {}: {}, skipping override {}", + owner_program_id, e, override_instance.id + ); + continue; + } }; - // Get the latest IDL version - let Some(versioned_idl) = idl_versions.peek() else { + // Get the latest IDL version (first in the sorted Vec) + let Some(versioned_idl) = idl_versions.first() else { warn!( "IDL versions empty for program {}, skipping override {}", owner_program_id, override_instance.id @@ -2470,13 +2491,21 @@ impl SurfnetSvm { } } - pub fn register_idl(&mut self, idl: Idl, slot: Option) { + pub fn register_idl(&mut self, idl: Idl, slot: Option) -> SurfpoolResult<()> { let slot = slot.unwrap_or(self.latest_epoch_info.absolute_slot); let program_id = Pubkey::from_str_const(&idl.address); - self.registered_idls - .entry(program_id) - .or_default() - .push(VersionedIdl(slot, idl)); + let program_id_str = program_id.to_string(); + let mut idl_versions = self + .registered_idls + .get(&program_id_str) + .ok() + .flatten() + .unwrap_or_default(); + idl_versions.push(VersionedIdl(slot, idl)); + // Sort by slot descending so the latest IDL is first + idl_versions.sort_by(|a, b| b.0.cmp(&a.0)); + self.registered_idls.store(program_id_str, idl_versions)?; + Ok(()) } fn encode_ui_account_profile_state( @@ -2633,7 +2662,10 @@ impl SurfnetSvm { let filter_slot = self.latest_epoch_info.absolute_slot; // todo: consider if we should pass in a slot if encoding == UiAccountEncoding::JsonParsed { - if let Some(registered_idls) = self.registered_idls.get(owner_program_id) { + if let Ok(Some(registered_idls)) = + self.registered_idls.get(&owner_program_id.to_string()) + { + // IDLs are stored sorted by slot descending (most recent first) let ordered_available_idls = registered_idls .iter() // only get IDLs that are active (their slot is before the latest slot) @@ -2884,10 +2916,14 @@ impl SurfnetSvm { absolute_slot, base_slot, scenario_relative_slot ); - self.scheduled_overrides - .entry(absolute_slot) - .or_insert_with(Vec::new) - .push(override_instance); + let mut slot_overrides = self + .scheduled_overrides + .get(&absolute_slot) + .ok() + .flatten() + .unwrap_or_default(); + slot_overrides.push(override_instance); + self.scheduled_overrides.store(absolute_slot, slot_overrides)?; } Ok(()) @@ -3434,7 +3470,7 @@ mod tests { serde_json::from_slice(&include_bytes!("../tests/assets/idl_v1.json").to_vec()) .unwrap(); - svm.register_idl(idl_v1.clone(), Some(0)); + svm.register_idl(idl_v1.clone(), Some(0)).unwrap(); let account_pubkey = Pubkey::new_unique(); @@ -3529,7 +3565,7 @@ mod tests { serde_json::from_slice(&include_bytes!("../tests/assets/idl_v2.json").to_vec()) .unwrap(); - svm.register_idl(idl_v2.clone(), Some(100)); + svm.register_idl(idl_v2.clone(), Some(100)).unwrap(); // even though we have a new IDL that is more recent, we should be able to match with the old IDL { diff --git a/crates/types/src/types.rs b/crates/types/src/types.rs index 164106667..ff858c512 100644 --- a/crates/types/src/types.rs +++ b/crates/types/src/types.rs @@ -898,7 +898,7 @@ pub enum DataIndexingCommand { } // Define a wrapper struct -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct VersionedIdl(pub Slot, pub Idl); // Implement ordering based on Slot @@ -1098,14 +1098,17 @@ pub struct GetStreamedAccountsResponse { accounts: Vec, } impl GetStreamedAccountsResponse { - pub fn new(streamed_accounts: &HashMap) -> Self { - let mut accounts = vec![]; - for (pubkey, include_owned_accounts) in streamed_accounts { - accounts.push(StreamedAccountInfo { - pubkey: pubkey.to_string(), - include_owned_accounts: *include_owned_accounts, - }); - } + pub fn from_iter(streamed_accounts: I) -> Self + where + I: IntoIterator, + { + let accounts = streamed_accounts + .into_iter() + .map(|(pubkey, include_owned_accounts)| StreamedAccountInfo { + pubkey, + include_owned_accounts, + }) + .collect(); Self { accounts } } } From f61cf9261731abb23d6a48935d15365ce5a7001a Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Fri, 9 Jan 2026 14:45:38 -0500 Subject: [PATCH 41/54] refactor HashMap to Storage for profile_tag_map --- crates/core/src/surfnet/locker.rs | 17 +++++------- crates/core/src/surfnet/svm.rs | 44 ++++++++++++++++++++----------- 2 files changed, 36 insertions(+), 25 deletions(-) diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index e4590e78f..fe1b30638 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -853,8 +853,8 @@ impl SurfnetSvmLocker { .await?; self.with_svm_writer(|svm_writer| { - svm_writer.write_executed_profile_result(signature, profile_result); - }); + svm_writer.write_executed_profile_result(signature, profile_result) + })?; Ok(()) } @@ -893,8 +893,8 @@ impl SurfnetSvmLocker { profile_result.key = UuidOrSignature::Uuid(uuid); self.with_svm_writer(|svm_writer| { - svm_writer.write_simulated_profile_result(uuid, tag, profile_result); - }); + svm_writer.write_simulated_profile_result(uuid, tag, profile_result) + })?; Ok(self.with_contextualized_svm_reader(|_| uuid)) } @@ -2024,11 +2024,8 @@ impl SurfnetSvmLocker { let token_accounts = svm_reader.get_token_accounts_by_mint(mint); // get mint information to determine decimals - let mint_decimals = if let Some(mint_account) = svm_reader - .token_mints - .get(&mint.to_string()) - .ok() - .flatten() + let mint_decimals = if let Some(mint_account) = + svm_reader.token_mints.get(&mint.to_string()).ok().flatten() { mint_account.decimals() } else { @@ -2370,7 +2367,7 @@ impl SurfnetSvmLocker { tag: String, config: &RpcProfileResultConfig, ) -> SurfpoolResult>> { - let tag_map = self.with_svm_reader(|svm| svm.profile_tag_map.get(&tag).cloned()); + let tag_map = self.with_svm_reader(|svm| svm.profile_tag_map.get(&tag).ok().flatten()); match tag_map { None => Ok(None), Some(uuids_or_sigs) => { diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 3762650bf..7a9c5a1b1 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -238,7 +238,7 @@ pub struct SurfnetSvm { pub signature_subscriptions: HashMap>, pub account_subscriptions: AccountSubscriptionData, pub slot_subscriptions: Vec>, - pub profile_tag_map: HashMap>, + pub profile_tag_map: Box>>, pub simulated_transaction_profiles: HashMap, pub executed_transaction_profiles: FifoMap, pub logs_subscriptions: Vec, @@ -263,7 +263,6 @@ pub struct SurfnetSvm { /// the update with higher write_version should supersede the one with lower write_version. pub write_version: u64, pub registered_idls: Box>>, - // pub registered_idls: HashMap<[u8; 8], BinaryHeap>, pub feature_set: FeatureSet, pub instruction_profiling_enabled: bool, pub max_profiles: usize, @@ -308,6 +307,7 @@ impl SurfnetSvm { self.streamed_accounts.shutdown(); self.scheduled_overrides.shutdown(); self.registered_idls.shutdown(); + self.profile_tag_map.shutdown(); } /// Creates a new instance of `SurfnetSvm`. @@ -362,6 +362,8 @@ impl SurfnetSvm { new_kv_store(&database_url, "scheduled_overrides", surfnet_id)?; let registered_idls_db: Box>> = new_kv_store(&database_url, "registered_idls", surfnet_id)?; + let profile_tag_map_db: Box>> = + new_kv_store(&database_url, "profile_tag_map", surfnet_id)?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() @@ -399,7 +401,7 @@ impl SurfnetSvm { signature_subscriptions: HashMap::new(), account_subscriptions: HashMap::new(), slot_subscriptions: Vec::new(), - profile_tag_map: HashMap::new(), + profile_tag_map: profile_tag_map_db, simulated_transaction_profiles: HashMap::new(), executed_transaction_profiles: FifoMap::default(), logs_subscriptions: Vec::new(), @@ -1134,7 +1136,7 @@ impl SurfnetSvm { self.transactions_queued_for_finalization.clear(); self.perf_samples.clear(); self.transactions_processed = 0; - self.profile_tag_map.clear(); + self.profile_tag_map.clear()?; self.simulated_transaction_profiles.clear(); self.accounts_by_owner.clear()?; self.accounts_by_owner.store( @@ -2397,28 +2399,39 @@ impl SurfnetSvm { uuid: Uuid, tag: Option, profile_result: KeyedProfileResult, - ) { + ) -> SurfpoolResult<()> { self.simulated_transaction_profiles .insert(uuid, profile_result); let tag = tag.unwrap_or_else(|| uuid.to_string()); - self.profile_tag_map - .entry(tag) - .or_default() - .push(UuidOrSignature::Uuid(uuid)); + let mut tags = self + .profile_tag_map + .get(&tag) + .ok() + .flatten() + .unwrap_or_default(); + tags.push(UuidOrSignature::Uuid(uuid)); + self.profile_tag_map.store(tag, tags)?; + Ok(()) } pub fn write_executed_profile_result( &mut self, signature: Signature, profile_result: KeyedProfileResult, - ) { + ) -> SurfpoolResult<()> { self.executed_transaction_profiles .insert(signature, profile_result); - self.profile_tag_map - .entry(signature.to_string()) - .or_default() - .push(UuidOrSignature::Signature(signature)); + let tag = signature.to_string(); + let mut tags = self + .profile_tag_map + .get(&tag) + .ok() + .flatten() + .unwrap_or_default(); + tags.push(UuidOrSignature::Signature(signature)); + self.profile_tag_map.store(tag, tags)?; + Ok(()) } pub fn subscribe_for_logs_updates( @@ -2923,7 +2936,8 @@ impl SurfnetSvm { .flatten() .unwrap_or_default(); slot_overrides.push(override_instance); - self.scheduled_overrides.store(absolute_slot, slot_overrides)?; + self.scheduled_overrides + .store(absolute_slot, slot_overrides)?; } Ok(()) From 2cf0d2bdf34a5eb5fab2edb794a39fe00bf53e55 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 12 Jan 2026 14:00:22 -0500 Subject: [PATCH 42/54] feat: implement Storage trait for FifoMap --- crates/core/src/storage/fifo_map.rs | 43 +++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 crates/core/src/storage/fifo_map.rs diff --git a/crates/core/src/storage/fifo_map.rs b/crates/core/src/storage/fifo_map.rs new file mode 100644 index 000000000..c53ef8fa9 --- /dev/null +++ b/crates/core/src/storage/fifo_map.rs @@ -0,0 +1,43 @@ +use serde::{Deserialize, Serialize}; +use std::hash::Hash; +use surfpool_types::FifoMap; + +impl super::Storage for FifoMap +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static + std::cmp::Eq + Hash, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn store(&mut self, key: K, value: V) -> super::StorageResult<()> { + self.insert(key, value); + Ok(()) + } + + fn clear(&mut self) -> super::StorageResult<()> { + self.clear(); + Ok(()) + } + + fn get(&self, key: &K) -> super::StorageResult> { + Ok(self.get(key).cloned()) + } + + fn take(&mut self, key: &K) -> super::StorageResult> { + Ok(self.remove(key)) + } + + fn keys(&self) -> super::StorageResult> { + Ok(self.iter().map(|(k, _)| k.clone()).collect()) + } + + fn into_iter(&self) -> super::StorageResult + '_>> { + Ok(Box::new(self.iter().map(|(k, v)| (k.clone(), v.clone())))) + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn contains_key(&self, key: &K) -> super::StorageResult { + Ok(self.contains_key(key)) + } +} From 7bc1b191f8153e2c3b32f561de2aa5c2a17c3897 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 12 Jan 2026 14:00:32 -0500 Subject: [PATCH 43/54] refactor: enhance new_kv_store functions to support additional constraints and improve storage initialization --- crates/core/src/storage/mod.rs | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index a33a5d778..9f1016690 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -1,3 +1,4 @@ +mod fifo_map; mod hash_map; #[cfg(feature = "postgres")] mod postgres; @@ -8,6 +9,7 @@ pub use hash_map::HashMap as StorageHashMap; pub use postgres::PostgresStorage; #[cfg(feature = "sqlite")] pub use sqlite::SqliteStorage; +pub use surfpool_types::FifoMap as StorageFifoMap; use crate::error::SurfpoolError; @@ -16,6 +18,28 @@ pub fn new_kv_store( table_name: &str, surfnet_id: u32, ) -> StorageResult>> +where + K: serde::Serialize + + serde::de::DeserializeOwned + + Send + + Sync + + 'static + + Clone + + Eq + + std::hash::Hash, + V: serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static + Clone, +{ + new_kv_store_with_default(database_url, table_name, surfnet_id, || { + Box::new(StorageHashMap::new()) + }) +} + +pub fn new_kv_store_with_default( + database_url: &Option<&str>, + table_name: &str, + surfnet_id: u32, + default_storage_constructor: fn() -> Box>, +) -> StorageResult>> where K: serde::Serialize + serde::de::DeserializeOwned @@ -65,8 +89,8 @@ where } } _ => { - let storage = StorageHashMap::new(); - Ok(Box::new(storage)) + let storage = default_storage_constructor(); + Ok(storage) } } } From 1ca867bd7cd2165f6cd430675dd4f6a76cfe340f Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 12 Jan 2026 14:00:45 -0500 Subject: [PATCH 44/54] refactor: update SurfnetSvm to use Storage trait for transaction profiles and enhance serialization for KeyedProfileResult --- crates/core/src/rpc/surfnet_cheatcodes.rs | 3 +- crates/core/src/surfnet/locker.rs | 17 +++-- crates/core/src/surfnet/svm.rs | 48 ++++++++++----- crates/types/src/types.rs | 75 ++++++++++++++++++++++- 4 files changed, 120 insertions(+), 23 deletions(-) diff --git a/crates/core/src/rpc/surfnet_cheatcodes.rs b/crates/core/src/rpc/surfnet_cheatcodes.rs index 80a0da5dd..7221a3b3f 100644 --- a/crates/core/src/rpc/surfnet_cheatcodes.rs +++ b/crates/core/src/rpc/surfnet_cheatcodes.rs @@ -2886,7 +2886,8 @@ mod tests { // Insert the profile into executed_transaction_profiles client.context.svm_locker.with_svm_writer(|svm| { svm.executed_transaction_profiles - .insert(signature, keyed_profile); + .store(signature.to_string(), keyed_profile) + .unwrap(); }); // Export snapshot with PreTransaction scope diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index fe1b30638..52b4c12a9 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -2342,11 +2342,18 @@ impl SurfnetSvmLocker { config: &RpcProfileResultConfig, ) -> SurfpoolResult> { let result = match &signature_or_uuid { - UuidOrSignature::Signature(signature) => self - .with_svm_reader(|svm| svm.executed_transaction_profiles.get(signature).cloned()), - UuidOrSignature::Uuid(uuid) => { - self.with_svm_reader(|svm| svm.simulated_transaction_profiles.get(uuid).cloned()) - } + UuidOrSignature::Signature(signature) => self.with_svm_reader(|svm| { + svm.executed_transaction_profiles + .get(&signature.to_string()) + .ok() + .flatten() + }), + UuidOrSignature::Uuid(uuid) => self.with_svm_reader(|svm| { + svm.simulated_transaction_profiles + .get(&uuid.to_string()) + .ok() + .flatten() + }), }; Ok(result.map(|profile| self.encode_ui_keyed_profile_result(profile, config))) } diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 7a9c5a1b1..bc57081a0 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -97,7 +97,7 @@ use crate::{ error::{SurfpoolError, SurfpoolResult}, rpc::utils::convert_transaction_metadata_from_canonical, scenarios::TemplateRegistry, - storage::{Storage, new_kv_store}, + storage::{Storage, new_kv_store, new_kv_store_with_default}, surfnet::{ LogsSubscriptionData, locker::is_supported_token_program, surfnet_lite_svm::SurfnetLiteSvm, }, @@ -239,8 +239,8 @@ pub struct SurfnetSvm { pub account_subscriptions: AccountSubscriptionData, pub slot_subscriptions: Vec>, pub profile_tag_map: Box>>, - pub simulated_transaction_profiles: HashMap, - pub executed_transaction_profiles: FifoMap, + pub simulated_transaction_profiles: Box>, + pub executed_transaction_profiles: Box>, pub logs_subscriptions: Vec, pub updated_at: u64, pub slot_time: u64, @@ -308,6 +308,8 @@ impl SurfnetSvm { self.scheduled_overrides.shutdown(); self.registered_idls.shutdown(); self.profile_tag_map.shutdown(); + self.simulated_transaction_profiles.shutdown(); + self.executed_transaction_profiles.shutdown(); } /// Creates a new instance of `SurfnetSvm`. @@ -364,6 +366,17 @@ impl SurfnetSvm { new_kv_store(&database_url, "registered_idls", surfnet_id)?; let profile_tag_map_db: Box>> = new_kv_store(&database_url, "profile_tag_map", surfnet_id)?; + let simulated_transaction_profiles_db: Box> = + new_kv_store(&database_url, "simulated_transaction_profiles", surfnet_id)?; + let executed_transaction_profiles_db: Box> = + new_kv_store_with_default( + &database_url, + "executed_transaction_profiles", + surfnet_id, + // Use FifoMap for executed_transaction_profiles to maintain FIFO eviction behavior + // (when no on-disk DB is provided) + || Box::new(FifoMap::::default()), + )?; let chain_tip = if let Some((_, block)) = blocks_db .into_iter() @@ -402,8 +415,8 @@ impl SurfnetSvm { account_subscriptions: HashMap::new(), slot_subscriptions: Vec::new(), profile_tag_map: profile_tag_map_db, - simulated_transaction_profiles: HashMap::new(), - executed_transaction_profiles: FifoMap::default(), + simulated_transaction_profiles: simulated_transaction_profiles_db, + executed_transaction_profiles: executed_transaction_profiles_db, logs_subscriptions: Vec::new(), updated_at: Utc::now().timestamp_millis() as u64, slot_time: DEFAULT_SLOT_TIME_MS, @@ -603,7 +616,11 @@ impl SurfnetSvm { pub fn set_profiling_map_capacity(&mut self, capacity: usize) { let clamped_capacity = max(1, capacity); self.max_profiles = clamped_capacity; - self.executed_transaction_profiles = FifoMap::new(clamped_capacity); + let is_on_disk_db = self.inner.db.is_some(); + if !is_on_disk_db { + // when using on-disk DB, we're not using the Fifo Map to manage entries + self.executed_transaction_profiles = Box::new(FifoMap::new(clamped_capacity)); + } } /// Airdrops a specified amount of lamports to a single public key. @@ -1137,7 +1154,8 @@ impl SurfnetSvm { self.perf_samples.clear(); self.transactions_processed = 0; self.profile_tag_map.clear()?; - self.simulated_transaction_profiles.clear(); + self.simulated_transaction_profiles.clear()?; + self.executed_transaction_profiles.clear()?; self.accounts_by_owner.clear()?; self.accounts_by_owner.store( native_mint_account.owner.to_string(), @@ -2401,7 +2419,7 @@ impl SurfnetSvm { profile_result: KeyedProfileResult, ) -> SurfpoolResult<()> { self.simulated_transaction_profiles - .insert(uuid, profile_result); + .store(uuid.to_string(), profile_result)?; let tag = tag.unwrap_or_else(|| uuid.to_string()); let mut tags = self @@ -2421,7 +2439,7 @@ impl SurfnetSvm { profile_result: KeyedProfileResult, ) -> SurfpoolResult<()> { self.executed_transaction_profiles - .insert(signature, profile_result); + .store(signature.to_string(), profile_result)?; let tag = signature.to_string(); let mut tags = self .profile_tag_map @@ -2876,7 +2894,10 @@ impl SurfnetSvm { ExportSnapshotScope::PreTransaction(signature_str) => { // Export accounts from a specific transaction's pre-execution state if let Ok(signature) = Signature::from_str(signature_str) { - if let Some(profile) = self.executed_transaction_profiles.get(&signature) { + if let Ok(Some(profile)) = self + .executed_transaction_profiles + .get(&signature.to_string()) + { // Collect accounts from pre-execution capture only // This gives us the account state BEFORE the transaction executed for (pubkey, account_opt) in @@ -3710,10 +3731,7 @@ mod tests { #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] fn test_profiling_map_capacity_default(test_type: TestType) { let (svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); - assert_eq!( - svm.executed_transaction_profiles.capacity(), - DEFAULT_PROFILING_MAP_CAPACITY - ); + assert_eq!(svm.max_profiles, DEFAULT_PROFILING_MAP_CAPACITY); } #[test_case(TestType::sqlite(); "with on-disk sqlite db")] @@ -3723,7 +3741,7 @@ mod tests { fn test_profiling_map_capacity_set(test_type: TestType) { let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); svm.set_profiling_map_capacity(10); - assert_eq!(svm.executed_transaction_profiles.capacity(), 10); + assert_eq!(svm.max_profiles, 10); } // Feature configuration tests diff --git a/crates/types/src/types.rs b/crates/types/src/types.rs index ff858c512..95f311235 100644 --- a/crates/types/src/types.rs +++ b/crates/types/src/types.rs @@ -139,12 +139,13 @@ pub struct ComputeUnitsEstimationResult { } /// The struct for storing the profiling results. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct KeyedProfileResult { pub slot: u64, pub key: UuidOrSignature, pub instruction_profiles: Option>, pub transaction_profile: ProfileResult, + #[serde(with = "pubkey_account_map")] pub readonly_account_states: HashMap, } @@ -166,9 +167,11 @@ impl KeyedProfileResult { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProfileResult { + #[serde(with = "pubkey_option_account_map")] pub pre_execution_capture: ExecutionCapture, + #[serde(with = "pubkey_option_account_map")] pub post_execution_capture: ExecutionCapture, pub compute_units_consumed: u64, pub log_messages: Option>, @@ -340,6 +343,69 @@ pub mod profile_state_map { } } +/// Serialization module for HashMap +pub mod pubkey_account_map { + use super::*; + + pub fn serialize(map: &HashMap, serializer: S) -> Result + where + S: Serializer, + { + let str_map: HashMap = + map.iter().map(|(k, v)| (k.to_string(), v)).collect(); + str_map.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let str_map: HashMap = HashMap::deserialize(deserializer)?; + str_map + .into_iter() + .map(|(k, v)| { + Pubkey::from_str(&k) + .map(|pk| (pk, v)) + .map_err(serde::de::Error::custom) + }) + .collect() + } +} + +/// Serialization module for BTreeMap> +pub mod pubkey_option_account_map { + use super::*; + + pub fn serialize( + map: &BTreeMap>, + serializer: S, + ) -> Result + where + S: Serializer, + { + let str_map: BTreeMap> = + map.iter().map(|(k, v)| (k.to_string(), v)).collect(); + str_map.serialize(serializer) + } + + pub fn deserialize<'de, D>( + deserializer: D, + ) -> Result>, D::Error> + where + D: Deserializer<'de>, + { + let str_map: BTreeMap> = BTreeMap::deserialize(deserializer)?; + str_map + .into_iter() + .map(|(k, v)| { + Pubkey::from_str(&k) + .map(|pk| (pk, v)) + .map_err(serde::de::Error::custom) + }) + .collect() + } +} + #[derive(Debug, Clone)] pub enum SubgraphCommand { CreateCollection(Uuid, SubgraphRequest, Sender), @@ -984,6 +1050,11 @@ impl FifoMap { self.map.contains_key(key) } + /// Removes a key from the map, returning the value if present. + pub fn remove(&mut self, key: &K) -> Option { + self.map.shift_remove(key) + } + // This is a wrapper around the IndexMap::iter() method, but it preserves the insertion order of the keys. // It's used to iterate over the profiling map in the order of the keys being inserted. pub fn iter(&self) -> impl ExactSizeIterator { From 45c6639cd691662eae9face9d9bb2ac670015e5e Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 12 Jan 2026 15:11:50 -0500 Subject: [PATCH 45/54] feat: add count method to Storage trait and implement for FifoMap, HashMap, PostgresStorage, and SqliteStorage --- crates/core/src/storage/fifo_map.rs | 4 ++++ crates/core/src/storage/hash_map.rs | 4 ++++ crates/core/src/storage/mod.rs | 16 ++++++++++++++++ crates/core/src/storage/postgres.rs | 26 ++++++++++++++++++++++++++ crates/core/src/storage/sqlite.rs | 26 ++++++++++++++++++++++++++ 5 files changed, 76 insertions(+) diff --git a/crates/core/src/storage/fifo_map.rs b/crates/core/src/storage/fifo_map.rs index c53ef8fa9..0f1ece473 100644 --- a/crates/core/src/storage/fifo_map.rs +++ b/crates/core/src/storage/fifo_map.rs @@ -40,4 +40,8 @@ where fn contains_key(&self, key: &K) -> super::StorageResult { Ok(self.contains_key(key)) } + + fn count(&self) -> super::StorageResult { + Ok(self.len() as u64) + } } diff --git a/crates/core/src/storage/hash_map.rs b/crates/core/src/storage/hash_map.rs index 54b62ce32..a84e53b71 100644 --- a/crates/core/src/storage/hash_map.rs +++ b/crates/core/src/storage/hash_map.rs @@ -40,4 +40,8 @@ where fn contains_key(&self, key: &K) -> super::StorageResult { Ok(self.contains_key(key)) } + + fn count(&self) -> super::StorageResult { + Ok(self.len() as u64) + } } diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index 9f1016690..cab5472e1 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -187,6 +187,17 @@ impl StorageError { QueryExecuteError::GetAllKeyValuePairsError(e), ) } + pub fn count( + table_name: &str, + db_type: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::CountError(e), + ) + } } #[derive(Debug, thiserror::Error)] @@ -203,6 +214,8 @@ pub enum QueryExecuteError { GetAllKeysError(#[source] surfpool_db::diesel::result::Error), #[error("Failed to get all key-value pairs: {0}")] GetAllKeyValuePairsError(#[source] surfpool_db::diesel::result::Error), + #[error("Failed to count entries: {0}")] + CountError(#[source] surfpool_db::diesel::result::Error), } pub type StorageResult = Result; @@ -224,6 +237,9 @@ pub trait Storage: Send + Sync { Ok(self.get(key)?.is_some()) } + /// Returns the number of entries in the storage. + fn count(&self) -> StorageResult; + /// Explicitly shutdown the storage, performing any cleanup like WAL checkpoint. /// This should be called before the application exits to ensure data is persisted. /// Default implementation does nothing. diff --git a/crates/core/src/storage/postgres.rs b/crates/core/src/storage/postgres.rs index 730c7296a..f26c1e272 100644 --- a/crates/core/src/storage/postgres.rs +++ b/crates/core/src/storage/postgres.rs @@ -59,6 +59,12 @@ struct KeyRecord { key: String, } +#[derive(QueryableByName, Debug)] +struct CountRecord { + #[diesel(sql_type = diesel::sql_types::BigInt)] + count: i64, +} + #[derive(Clone)] pub struct PostgresStorage { pool: Pool>, @@ -290,6 +296,26 @@ where Box::new(self.clone()) } + fn count(&self) -> StorageResult { + debug!("Counting entries in table '{}'", self.table_name); + let query = sql_query(format!( + "SELECT COUNT(*) as count FROM {} WHERE surfnet_id = $1", + self.table_name + )) + .bind::(self.surfnet_id as i32); + + trace!("Getting connection from pool for count operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::count(&self.table_name, NAME, e))?; + + let count = records.first().map(|r| r.count as u64).unwrap_or(0); + debug!("Table '{}' has {} entries", self.table_name, count); + Ok(count) + } + fn into_iter(&self) -> StorageResult + '_>> { debug!( "Creating iterator for all key-value pairs in table '{}'", diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index 23bf3f338..83a6abf99 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -41,6 +41,12 @@ struct KeyRecord { key: String, } +#[derive(QueryableByName, Debug)] +struct CountRecord { + #[diesel(sql_type = diesel::sql_types::BigInt)] + count: i64, +} + #[derive(Clone)] pub struct SqliteStorage { pool: Pool>, @@ -347,6 +353,26 @@ where self.checkpoint(); } + fn count(&self) -> StorageResult { + debug!("Counting entries in table '{}'", self.table_name); + let query = sql_query(format!( + "SELECT COUNT(*) as count FROM {} WHERE surfnet_id = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32); + + trace!("Getting connection from pool for count operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::count(&self.table_name, NAME, e))?; + + let count = records.first().map(|r| r.count as u64).unwrap_or(0); + debug!("Table '{}' has {} entries", self.table_name, count); + Ok(count) + } + fn into_iter(&self) -> StorageResult + '_>> { debug!( "Creating iterator for all key-value pairs in table '{}'", From 5cd7d35a2c3383412d133c9979b0d22789993150 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 12 Jan 2026 15:47:17 -0500 Subject: [PATCH 46/54] fix: set svm's initial `transactions_processed` to come from db count --- crates/core/src/surfnet/svm.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index bc57081a0..6e0684f04 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -391,6 +391,9 @@ impl SurfnetSvm { BlockIdentifier::zero() }; + // Initialize transactions_processed from database count for persistent storage + let transactions_processed = transactions_db.count()?; + let mut svm = Self { inner, remote_rpc_url: None, @@ -398,7 +401,7 @@ impl SurfnetSvm { blocks: blocks_db, transactions: transactions_db, perf_samples: VecDeque::new(), - transactions_processed: 0, + transactions_processed, simnet_events_tx, geyser_events_tx, latest_epoch_info: EpochInfo { From 5f8566058da6dbe3aedd773c5517231435f7f981 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 12 Jan 2026 15:47:41 -0500 Subject: [PATCH 47/54] fix: display correct successful tx count in tui on startup (from db data) --- crates/cli/src/cli/simnet/mod.rs | 11 +++++++---- crates/cli/src/tui/simnet.rs | 7 +++++-- crates/core/src/runloops/mod.rs | 3 ++- crates/core/src/tests/integration.rs | 9 +++++---- crates/mcp/src/surfpool/start_surfnet.rs | 2 +- crates/types/src/types.rs | 3 ++- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/crates/cli/src/cli/simnet/mod.rs b/crates/cli/src/cli/simnet/mod.rs index 7fb8abde1..87eba3a99 100644 --- a/crates/cli/src/cli/simnet/mod.rs +++ b/crates/cli/src/cli/simnet/mod.rs @@ -157,17 +157,17 @@ pub async fn handle_start_local_surfnet_command( }) .map_err(|e| format!("{}", e))?; - loop { + let initial_transactions = loop { match simnet_events_rx.recv() { Ok(SimnetEvent::Aborted(error)) => { eprintln!("Error: {}", error); return Err(error); } Ok(SimnetEvent::Shutdown) => return Ok(()), - Ok(SimnetEvent::Ready) => break, + Ok(SimnetEvent::Ready(initial_transactions)) => break initial_transactions, _other => continue, } - } + }; for event in airdrop_events { let _ = simnet_events_tx.send(event); @@ -225,6 +225,7 @@ pub async fn handle_start_local_surfnet_command( explorer_handle, ctx_cc, Some(runloop_terminator), + initial_transactions, ) .await; @@ -246,6 +247,7 @@ async fn start_service( explorer_handle: Option, _ctx: Context, runloop_terminator: Option>, + initial_transactions: u64, ) -> Result<(), String> { let displayed_url = if cmd.no_studio { DisplayedUrl::Datasource(sanitized_config) @@ -272,6 +274,7 @@ async fn start_service( deploy_progress_rx, displayed_url, breaker, + initial_transactions, ) .map_err(|e| format!("{}", e))?; } @@ -369,7 +372,7 @@ fn log_events( error!("{}", error); return Err(error); } - SimnetEvent::Ready => {} + SimnetEvent::Ready(_) => {} SimnetEvent::Connected(_rpc_url) => {} SimnetEvent::Shutdown => { break; diff --git a/crates/cli/src/tui/simnet.rs b/crates/cli/src/tui/simnet.rs index 911a18f3c..3974273be 100644 --- a/crates/cli/src/tui/simnet.rs +++ b/crates/cli/src/tui/simnet.rs @@ -285,6 +285,7 @@ impl App { deploy_progress_rx: Vec>, displayed_url: DisplayedUrl, breaker: Option, + initial_transactions: u64, ) -> App { let theme = Theme::detect(); let palette = theme.palette(); @@ -328,7 +329,7 @@ impl App { block_height: 0, transaction_count: None, }, - successful_transactions: 0, + successful_transactions: initial_transactions as u32, events, include_debug_logs, deploy_progress_rx, @@ -403,6 +404,7 @@ pub fn start_app( deploy_progress_rx: Vec>, displayed_url: DisplayedUrl, breaker: Option, + initial_transactions: u64, ) -> Result<(), Box> { // setup terminal enable_raw_mode()?; @@ -419,6 +421,7 @@ pub fn start_app( deploy_progress_rx, displayed_url, breaker, + initial_transactions, ); let res = run_app(&mut terminal, app); @@ -553,7 +556,7 @@ fn run_app(terminal: &mut Terminal, mut app: App) -> io::Result<( SimnetEvent::Aborted(_error) => { break; } - SimnetEvent::Ready => {} + SimnetEvent::Ready(_) => {} SimnetEvent::Connected(_) => {} SimnetEvent::Shutdown => { break; diff --git a/crates/core/src/runloops/mod.rs b/crates/core/src/runloops/mod.rs index af62380bb..cef20d9d5 100644 --- a/crates/core/src/runloops/mod.rs +++ b/crates/core/src/runloops/mod.rs @@ -138,7 +138,8 @@ pub async fn start_local_surfnet_runloop( let (clock_event_rx, clock_command_tx) = start_clock_runloop(simnet_config.slot_time, Some(simnet_events_tx_cc.clone())); - let _ = simnet_events_tx_cc.send(SimnetEvent::Ready); + let initial_transactions = svm_locker.with_svm_reader(|svm| svm.transactions_processed); + let _ = simnet_events_tx_cc.send(SimnetEvent::Ready(initial_transactions)); start_block_production_runloop( clock_event_rx, diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index c2940c74a..020a44d67 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -64,7 +64,7 @@ fn wait_for_ready_and_connected(simnet_events_rx: &crossbeam_channel::Receiver { + Ok(SimnetEvent::Ready(_)) => { ready = true; } Ok(SimnetEvent::Connected(_)) => { @@ -113,7 +113,7 @@ async fn test_simnet_ready(test_type: TestType) { }); match simnet_events_rx.recv() { - Ok(SimnetEvent::Ready) | Ok(SimnetEvent::Connected(_)) => (), + Ok(SimnetEvent::Ready(_)) | Ok(SimnetEvent::Connected(_)) => (), e => panic!("Expected Ready event: {e:?}"), } } @@ -3339,7 +3339,8 @@ fn boot_simnet( }); loop { - if let Ok(SimnetEvent::Ready) = simnet_events_rx.recv_timeout(Duration::from_millis(1000)) { + if let Ok(SimnetEvent::Ready(_)) = simnet_events_rx.recv_timeout(Duration::from_millis(1000)) + { break; } } @@ -4347,7 +4348,7 @@ fn start_surfnet( let mut connected = offline_mode; loop { match simnet_events_rx.recv() { - Ok(SimnetEvent::Ready) => { + Ok(SimnetEvent::Ready(_)) => { ready = true; } Ok(SimnetEvent::Connected(_)) => { diff --git a/crates/mcp/src/surfpool/start_surfnet.rs b/crates/mcp/src/surfpool/start_surfnet.rs index b3c40e505..ad1752a4c 100644 --- a/crates/mcp/src/surfpool/start_surfnet.rs +++ b/crates/mcp/src/surfpool/start_surfnet.rs @@ -140,7 +140,7 @@ pub fn run_headless(surfnet_id: u16, rpc_port: u16, ws_port: u16) -> StartSurfne SimnetEvent::Aborted(error) => { return StartSurfnetResponse::error(error); } - SimnetEvent::Ready => { + SimnetEvent::Ready(_) => { let surfnet_url = format!("http://127.0.0.1:{}", rpc_port); break StartSurfnetResponse::success(StartSurfnetSuccess { kind: StartSurfnetKind::Headless, diff --git a/crates/types/src/types.rs b/crates/types/src/types.rs index 95f311235..83685721d 100644 --- a/crates/types/src/types.rs +++ b/crates/types/src/types.rs @@ -416,7 +416,8 @@ pub enum SubgraphCommand { #[derive(Debug)] pub enum SimnetEvent { - Ready, + /// Surfnet is ready, with the initial count of processed transactions from storage + Ready(u64), Connected(String), Aborted(String), Shutdown, From 8f8c9568dcca8302a6fa6f63baf06e26272b7a59 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 12 Jan 2026 16:18:22 -0500 Subject: [PATCH 48/54] implement storage for account_associated_data --- Cargo.lock | 1 + crates/core/Cargo.toml | 1 + crates/core/src/surfnet/locker.rs | 13 +++- crates/core/src/surfnet/svm.rs | 88 ++++++++++++++++---------- crates/core/src/types.rs | 102 +++++++++++++++++++++++++++++- 5 files changed, 169 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 770eebb04..fe006403a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12228,6 +12228,7 @@ dependencies = [ "blake3", "borsh 1.5.7", "bs58", + "bytemuck", "chrono", "convert_case 0.8.0", "crossbeam", diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 3218a1404..c92f489c7 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -20,6 +20,7 @@ base64 = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } borsh = { workspace = true } +bytemuck = "1.21" bs58 = { workspace = true } chrono = { workspace = true } convert_case = { workspace = true } diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 101679c9d..098bbe51d 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -4240,7 +4240,9 @@ mod tests { .unwrap(); assert_eq!(loaded, 1); - let account = locker.with_svm_reader(|svm| svm.get_account(&pubkey)); + let account = locker + .with_svm_reader(|svm| svm.get_account(&pubkey)) + .unwrap(); assert!(account.is_some()); let account = account.unwrap(); assert_eq!(account.lamports, 1_000_000); @@ -4286,6 +4288,7 @@ mod tests { for (i, pubkey) in pubkeys.iter().enumerate() { let account = locker .with_svm_reader(|svm| svm.get_account(pubkey)) + .unwrap() .unwrap(); assert_eq!(account.lamports, (i as u64 + 1) * 1_000_000); assert_eq!(account.owner, owner); @@ -4332,6 +4335,7 @@ mod tests { assert!( locker .with_svm_reader(|svm| svm.get_account(&pubkey1)) + .unwrap() .is_some() ); @@ -4339,6 +4343,7 @@ mod tests { assert!( locker .with_svm_reader(|svm| svm.get_account(&pubkey2)) + .unwrap() .is_none() ); } @@ -4407,6 +4412,7 @@ mod tests { assert!( locker .with_svm_reader(|svm| svm.get_account(&pubkey)) + .unwrap() .is_none() ); } @@ -4484,7 +4490,9 @@ mod tests { .unwrap(); // Verify account is in the owner index - let owned_accounts = locker.with_svm_reader(|svm| svm.get_account_owned_by(&owner)); + let owned_accounts = locker + .with_svm_reader(|svm| svm.get_account_owned_by(&owner)) + .unwrap(); assert_eq!(owned_accounts.len(), 1); assert_eq!(owned_accounts[0].0, pubkey); } @@ -4540,6 +4548,7 @@ mod tests { assert!( locker .with_svm_reader(|svm| svm.get_account(&valid_pubkey)) + .unwrap() .is_some() ); } diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 63ecc3cf6..4d6494b65 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -102,8 +102,8 @@ use crate::{ LogsSubscriptionData, locker::is_supported_token_program, surfnet_lite_svm::SurfnetLiteSvm, }, types::{ - GeyserAccountUpdate, MintAccount, SurfnetTransactionStatus, SyntheticBlockhash, - TokenAccount, TransactionWithStatusMeta, + GeyserAccountUpdate, MintAccount, SerializableAccountAdditionalData, + SurfnetTransactionStatus, SyntheticBlockhash, TokenAccount, TransactionWithStatusMeta, }, }; @@ -247,7 +247,7 @@ pub struct SurfnetSvm { pub slot_time: u64, pub start_time: SystemTime, pub accounts_by_owner: Box>>, - pub account_associated_data: HashMap, + pub account_associated_data: Box>, pub token_accounts: Box>, pub token_mints: Box>, pub token_accounts_by_owner: Box>>, @@ -311,6 +311,7 @@ impl SurfnetSvm { self.profile_tag_map.shutdown(); self.simulated_transaction_profiles.shutdown(); self.executed_transaction_profiles.shutdown(); + self.account_associated_data.shutdown(); } /// Creates a new instance of `SurfnetSvm`. @@ -336,7 +337,7 @@ impl SurfnetSvm { .get_account(&spl_token_interface::native_mint::ID)? .unwrap(); - let account_associated_data = { + let native_mint_associated_data = { let mint = StateWithExtensions::::unpack( &native_mint_account.data, ) @@ -350,17 +351,13 @@ impl SurfnetSvm { .get_extension::() .map(|x| (*x, unix_timestamp)) .ok(); - let account_associated_data = HashMap::from([( - spl_token_interface::native_mint::ID, - AccountAdditionalDataV3 { - spl_token_additional_data: Some(SplTokenAdditionalDataV2 { - decimals: mint.base.decimals, - interest_bearing_config, - scaled_ui_amount_config, - }), - }, - )]); - account_associated_data + AccountAdditionalDataV3 { + spl_token_additional_data: Some(SplTokenAdditionalDataV2 { + decimals: mint.base.decimals, + interest_bearing_config, + scaled_ui_amount_config, + }), + } }; let parsed_mint_account = MintAccount::unpack(&native_mint_account.data).unwrap(); @@ -376,6 +373,14 @@ impl SurfnetSvm { let token_accounts_db = new_kv_store(&database_url, "token_accounts", surfnet_id)?; let mut token_mints_db: Box> = new_kv_store(&database_url, "token_mints", surfnet_id)?; + let mut account_associated_data_db: Box< + dyn Storage, + > = new_kv_store(&database_url, "account_associated_data", surfnet_id)?; + // Store initial account associated data (native mint) + account_associated_data_db.store( + spl_token_interface::native_mint::ID.to_string(), + native_mint_associated_data.into(), + )?; token_mints_db.store( spl_token_interface::native_mint::ID.to_string(), parsed_mint_account, @@ -454,7 +459,7 @@ impl SurfnetSvm { slot_time: DEFAULT_SLOT_TIME_MS, start_time: SystemTime::now(), accounts_by_owner: accounts_by_owner_db, - account_associated_data, + account_associated_data: account_associated_data_db, token_accounts: token_accounts_db, token_mints: token_mints_db, token_accounts_by_owner: token_accounts_by_owner_db, @@ -1101,16 +1106,16 @@ impl SurfnetSvm { .get_extension::() .map(|x| (*x, unix_timestamp)) .ok(); - self.account_associated_data.insert( - *pubkey, - AccountAdditionalDataV3 { - spl_token_additional_data: Some(SplTokenAdditionalDataV2 { - decimals: mint.base.decimals, - interest_bearing_config, - scaled_ui_amount_config, - }), - }, - ); + let additional_data: SerializableAccountAdditionalData = AccountAdditionalDataV3 { + spl_token_additional_data: Some(SplTokenAdditionalDataV2 { + decimals: mint.base.decimals, + interest_bearing_config, + scaled_ui_amount_config, + }), + } + .into(); + self.account_associated_data + .store(pubkey.to_string(), additional_data)?; }; } Ok(()) @@ -1204,7 +1209,7 @@ impl SurfnetSvm { native_mint_account.owner.to_string(), vec![spl_token_interface::native_mint::ID.to_string()], )?; - self.account_associated_data.clear(); + self.account_associated_data.clear()?; self.token_accounts.clear()?; self.token_mints.clear()?; self.token_mints.store( @@ -2299,7 +2304,13 @@ impl SurfnetSvm { .map(|ta| ta.mint()) }; - token_mint.and_then(|mint| self.account_associated_data.get(&mint).cloned()) + token_mint.and_then(|mint| { + self.account_associated_data + .get(&mint.to_string()) + .ok() + .flatten() + .and_then(|data| data.try_into().ok()) + }) } pub fn account_to_rpc_keyed_account( @@ -2918,18 +2929,29 @@ impl SurfnetSvm { } // For token accounts, we need to provide the mint additional data - let additional_data = if account.owner == spl_token_interface::id() + let additional_data: Option = if account.owner + == spl_token_interface::id() || account.owner == spl_token_2022_interface::id() { if let Ok(token_account) = TokenAccount::unpack(&account.data) { self.account_associated_data - .get(&token_account.mint()) - .cloned() + .get(&token_account.mint().to_string()) + .ok() + .flatten() + .and_then(|data| data.try_into().ok()) } else { - self.account_associated_data.get(pubkey).cloned() + self.account_associated_data + .get(&pubkey.to_string()) + .ok() + .flatten() + .and_then(|data| data.try_into().ok()) } } else { - self.account_associated_data.get(pubkey).cloned() + self.account_associated_data + .get(&pubkey.to_string()) + .ok() + .flatten() + .and_then(|data| data.try_into().ok()) }; let ui_account = diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index e85207491..e0fad8695 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -2,6 +2,7 @@ use std::{collections::HashSet, vec}; use agave_reserved_account_keys::ReservedAccountKeys; use base64::{Engine, prelude::BASE64_STANDARD}; +use bytemuck::{Pod, bytes_of, from_bytes}; use chrono::Utc; use litesvm::types::TransactionMetadata; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -32,7 +33,11 @@ use solana_transaction_status::{ parse_accounts::{parse_legacy_message_accounts, parse_v0_message_accounts}, parse_ui_inner_instructions, }; -use spl_token_2022_interface::extension::StateWithExtensions; +use solana_account_decoder::parse_account_data::{AccountAdditionalDataV3, SplTokenAdditionalDataV2}; +use spl_token_2022_interface::extension::{ + StateWithExtensions, interest_bearing_mint::InterestBearingConfig, + scaled_ui_amount::ScaledUiAmountConfig, +}; use txtx_addon_kit::indexmap::IndexMap; use crate::{ @@ -40,6 +45,101 @@ use crate::{ surfnet::locker::{format_ui_amount, format_ui_amount_string}, }; +/// Helper function to serialize a Pod type to base64 +fn serialize_pod_to_base64(value: &T) -> String { + BASE64_STANDARD.encode(bytes_of(value)) +} + +/// Helper function to deserialize a Pod type from base64 +fn deserialize_pod_from_base64(encoded: &str) -> Result { + let bytes = BASE64_STANDARD + .decode(encoded) + .map_err(|e| format!("base64 decode error: {}", e))?; + if bytes.len() != std::mem::size_of::() { + return Err(format!( + "Invalid byte length: expected {}, got {}", + std::mem::size_of::(), + bytes.len() + )); + } + Ok(*from_bytes::(&bytes)) +} + +/// Serializable version of SplTokenAdditionalDataV2 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SerializableSplTokenAdditionalData { + pub decimals: u8, + /// InterestBearingConfig serialized as base64, paired with unix timestamp + pub interest_bearing_config: Option<(String, i64)>, + /// ScaledUiAmountConfig serialized as base64, paired with unix timestamp + pub scaled_ui_amount_config: Option<(String, i64)>, +} + +impl From for SerializableSplTokenAdditionalData { + fn from(data: SplTokenAdditionalDataV2) -> Self { + Self { + decimals: data.decimals, + interest_bearing_config: data + .interest_bearing_config + .map(|(config, ts)| (serialize_pod_to_base64(&config), ts)), + scaled_ui_amount_config: data + .scaled_ui_amount_config + .map(|(config, ts)| (serialize_pod_to_base64(&config), ts)), + } + } +} + +impl TryFrom for SplTokenAdditionalDataV2 { + type Error = String; + + fn try_from(data: SerializableSplTokenAdditionalData) -> Result { + Ok(Self { + decimals: data.decimals, + interest_bearing_config: data + .interest_bearing_config + .map(|(encoded, ts)| { + deserialize_pod_from_base64::(&encoded) + .map(|config| (config, ts)) + }) + .transpose()?, + scaled_ui_amount_config: data + .scaled_ui_amount_config + .map(|(encoded, ts)| { + deserialize_pod_from_base64::(&encoded) + .map(|config| (config, ts)) + }) + .transpose()?, + }) + } +} + +/// Serializable version of AccountAdditionalDataV3 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SerializableAccountAdditionalData { + pub spl_token_additional_data: Option, +} + +impl From for SerializableAccountAdditionalData { + fn from(data: AccountAdditionalDataV3) -> Self { + Self { + spl_token_additional_data: data.spl_token_additional_data.map(Into::into), + } + } +} + +impl TryFrom for AccountAdditionalDataV3 { + type Error = String; + + fn try_from(data: SerializableAccountAdditionalData) -> Result { + Ok(Self { + spl_token_additional_data: data + .spl_token_additional_data + .map(TryInto::try_into) + .transpose()?, + }) + } +} + /// Serializable version of TransactionTokenBalance #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SerializableTransactionTokenBalance { From 14b4ebd52d0836032bb41ff4bcc3d629b9ab5fba Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Mon, 12 Jan 2026 22:55:14 -0500 Subject: [PATCH 49/54] set native mint associated data on reset network --- crates/core/src/surfnet/svm.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 4d6494b65..3463aa412 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -1193,6 +1193,30 @@ impl SurfnetSvm { .inner .get_account(&spl_token_interface::native_mint::ID)? .unwrap(); + + let native_mint_associated_data = { + let mint = StateWithExtensions::::unpack( + &native_mint_account.data, + ) + .unwrap(); + let unix_timestamp = self.inner.get_sysvar::().unix_timestamp; + let interest_bearing_config = mint + .get_extension::() + .map(|x| (*x, unix_timestamp)) + .ok(); + let scaled_ui_amount_config = mint + .get_extension::() + .map(|x| (*x, unix_timestamp)) + .ok(); + AccountAdditionalDataV3 { + spl_token_additional_data: Some(SplTokenAdditionalDataV2 { + decimals: mint.base.decimals, + interest_bearing_config, + scaled_ui_amount_config, + }), + } + }; + let parsed_mint_account = MintAccount::unpack(&native_mint_account.data).unwrap(); self.blocks.clear()?; @@ -1210,6 +1234,10 @@ impl SurfnetSvm { vec![spl_token_interface::native_mint::ID.to_string()], )?; self.account_associated_data.clear()?; + self.account_associated_data.store( + spl_token_interface::native_mint::ID.to_string(), + native_mint_associated_data.into(), + )?; self.token_accounts.clear()?; self.token_mints.clear()?; self.token_mints.store( From 3c707dbdfa516c08a111ccfaea3941c85382d186 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 13 Jan 2026 09:23:36 -0500 Subject: [PATCH 50/54] add overlay storage to prevent db mutations when profiling --- crates/core/src/storage/mod.rs | 2 + crates/core/src/storage/overlay.rs | 413 ++++++++++++++++++++ crates/core/src/surfnet/locker.rs | 11 +- crates/core/src/surfnet/surfnet_lite_svm.rs | 15 +- crates/core/src/surfnet/svm.rs | 76 +++- 5 files changed, 508 insertions(+), 9 deletions(-) create mode 100644 crates/core/src/storage/overlay.rs diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index cab5472e1..bbb5f381b 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -1,10 +1,12 @@ mod fifo_map; mod hash_map; +mod overlay; #[cfg(feature = "postgres")] mod postgres; #[cfg(feature = "sqlite")] mod sqlite; pub use hash_map::HashMap as StorageHashMap; +pub use overlay::OverlayStorage; #[cfg(feature = "postgres")] pub use postgres::PostgresStorage; #[cfg(feature = "sqlite")] diff --git a/crates/core/src/storage/overlay.rs b/crates/core/src/storage/overlay.rs new file mode 100644 index 000000000..3cc41a522 --- /dev/null +++ b/crates/core/src/storage/overlay.rs @@ -0,0 +1,413 @@ +use std::collections::{HashMap, HashSet}; +use std::hash::Hash; +use std::sync::{Arc, RwLock}; + +use serde::{Deserialize, Serialize}; + +use super::{Storage, StorageError, StorageResult}; + +/// Represents the state of a key in the overlay +#[derive(Clone)] +enum OverlayEntry { + /// Value was written to overlay + Written(V), + /// Value was deleted in overlay (tombstone) + Deleted, +} + +/// Thread-safe overlay storage that wraps a base storage. +/// All writes go to an in-memory HashMap overlay. +/// Reads check overlay first, then fall through to base. +/// Deletes are tracked as tombstones in the overlay. +/// +/// This is useful for transaction profiling where we need to +/// read from the database but not persist any mutations. +pub struct OverlayStorage { + /// The base storage (could be SQLite, Postgres, HashMap, etc.) + base: Box>, + /// In-memory overlay for writes and deletes + /// Using Arc> for thread-safety (Send + Sync) + overlay: Arc>>>, + /// Track if base was "cleared" - if true, ignore base for reads + base_cleared: Arc>, +} + +impl OverlayStorage +where + K: Clone + Eq + Hash + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, +{ + /// Create a new overlay wrapping the given base storage + pub fn new(base: Box>) -> Self { + Self { + base, + overlay: Arc::new(RwLock::new(HashMap::new())), + base_cleared: Arc::new(RwLock::new(false)), + } + } +} + +impl OverlayStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Eq + Hash + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + /// Create a boxed overlay from a base storage. + /// This is a convenience method for wrapping storage fields. + pub fn wrap(base: Box>) -> Box> { + Box::new(OverlayStorage::new(base)) + } +} + +impl Storage for OverlayStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Eq + Hash + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn store(&mut self, key: K, value: V) -> StorageResult<()> { + // Write only to overlay, never to base + let mut overlay = self.overlay.write().map_err(|_| StorageError::LockError)?; + overlay.insert(key, OverlayEntry::Written(value)); + Ok(()) + } + + fn get(&self, key: &K) -> StorageResult> { + // First check overlay + let overlay = self.overlay.read().map_err(|_| StorageError::LockError)?; + + if let Some(entry) = overlay.get(key) { + return match entry { + OverlayEntry::Written(v) => Ok(Some(v.clone())), + OverlayEntry::Deleted => Ok(None), // Tombstone - don't query base + }; + } + drop(overlay); // Release read lock before querying base + + // Check if base was cleared + let base_cleared = self.base_cleared.read().map_err(|_| StorageError::LockError)?; + if *base_cleared { + return Ok(None); + } + drop(base_cleared); + + // Fall through to base storage + self.base.get(key) + } + + fn take(&mut self, key: &K) -> StorageResult> { + let mut overlay = self.overlay.write().map_err(|_| StorageError::LockError)?; + + // Check if key exists in overlay + if let Some(entry) = overlay.get(key) { + match entry { + OverlayEntry::Written(v) => { + let value = v.clone(); + // Replace with tombstone + overlay.insert(key.clone(), OverlayEntry::Deleted); + return Ok(Some(value)); + } + OverlayEntry::Deleted => { + // Already deleted + return Ok(None); + } + } + } + drop(overlay); + + // Check if base was cleared + let base_cleared = self.base_cleared.read().map_err(|_| StorageError::LockError)?; + if *base_cleared { + return Ok(None); + } + drop(base_cleared); + + // Get from base (but don't modify base) + let value = self.base.get(key)?; + + if value.is_some() { + // Mark as deleted in overlay + let mut overlay = self.overlay.write().map_err(|_| StorageError::LockError)?; + overlay.insert(key.clone(), OverlayEntry::Deleted); + } + + Ok(value) + } + + fn clear(&mut self) -> StorageResult<()> { + // Mark base as cleared and clear overlay + let mut base_cleared = self + .base_cleared + .write() + .map_err(|_| StorageError::LockError)?; + *base_cleared = true; + + let mut overlay = self.overlay.write().map_err(|_| StorageError::LockError)?; + overlay.clear(); + + Ok(()) + } + + fn keys(&self) -> StorageResult> { + let overlay = self.overlay.read().map_err(|_| StorageError::LockError)?; + let base_cleared = *self + .base_cleared + .read() + .map_err(|_| StorageError::LockError)?; + + let mut result_keys: HashSet = HashSet::new(); + let mut deleted_keys: HashSet = HashSet::new(); + + // Collect overlay keys (written) and deleted keys + for (k, entry) in overlay.iter() { + match entry { + OverlayEntry::Written(_) => { + result_keys.insert(k.clone()); + } + OverlayEntry::Deleted => { + deleted_keys.insert(k.clone()); + } + } + } + + // If base not cleared, add base keys (excluding deleted ones) + if !base_cleared { + drop(overlay); + + for key in self.base.keys()? { + if !deleted_keys.contains(&key) && !result_keys.contains(&key) { + result_keys.insert(key); + } + } + } + + Ok(result_keys.into_iter().collect()) + } + + fn into_iter(&self) -> StorageResult + '_>> { + let overlay = self.overlay.read().map_err(|_| StorageError::LockError)?; + let base_cleared = *self + .base_cleared + .read() + .map_err(|_| StorageError::LockError)?; + + // Collect deleted keys for filtering + let deleted_keys: HashSet = overlay + .iter() + .filter_map(|(k, entry)| { + if matches!(entry, OverlayEntry::Deleted) { + Some(k.clone()) + } else { + None + } + }) + .collect(); + + // Collect overlay written entries + let overlay_entries: Vec<(K, V)> = overlay + .iter() + .filter_map(|(k, entry)| { + if let OverlayEntry::Written(v) = entry { + Some((k.clone(), v.clone())) + } else { + None + } + }) + .collect(); + + let overlay_keys: HashSet = overlay_entries.iter().map(|(k, _)| k.clone()).collect(); + + drop(overlay); + + // Get base entries if not cleared + let base_entries: Vec<(K, V)> = if !base_cleared { + self.base + .into_iter()? + .filter(|(k, _)| !deleted_keys.contains(k) && !overlay_keys.contains(k)) + .collect() + } else { + Vec::new() + }; + + // Chain overlay entries with filtered base entries + let all_entries = overlay_entries + .into_iter() + .chain(base_entries) + .collect::>(); + Ok(Box::new(all_entries.into_iter())) + } + + fn count(&self) -> StorageResult { + // Use keys() which handles all edge cases correctly + Ok(self.keys()?.len() as u64) + } + + fn shutdown(&self) { + // No-op - don't propagate to base + // The base storage should not be affected by overlay shutdown + } + + fn clone_box(&self) -> Box> { + // Clone the overlay with its current state + let overlay = self.overlay.read().unwrap(); + let base_cleared = *self.base_cleared.read().unwrap(); + + Box::new(OverlayStorage { + base: self.base.clone_box(), + overlay: Arc::new(RwLock::new(overlay.clone())), + base_cleared: Arc::new(RwLock::new(base_cleared)), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::storage::StorageHashMap; + + #[test] + fn test_overlay_write_does_not_affect_base() { + let mut base: Box> = + Box::new(StorageHashMap::::new()); + base.store("key1".into(), "base_value".into()).unwrap(); + + let mut overlay = OverlayStorage::new(base.clone_box()); + overlay + .store("key1".into(), "overlay_value".into()) + .unwrap(); + + // Overlay should return overlay value + assert_eq!( + overlay.get(&"key1".into()).unwrap(), + Some("overlay_value".into()) + ); + + // Base should still have original value + assert_eq!( + base.get(&"key1".into()).unwrap(), + Some("base_value".into()) + ); + } + + #[test] + fn test_overlay_read_falls_through_to_base() { + let mut base: Box> = + Box::new(StorageHashMap::::new()); + base.store("key1".into(), "base_value".into()).unwrap(); + + let overlay = OverlayStorage::new(base); + + assert_eq!( + overlay.get(&"key1".into()).unwrap(), + Some("base_value".into()) + ); + } + + #[test] + fn test_overlay_delete_creates_tombstone() { + let mut base: Box> = + Box::new(StorageHashMap::::new()); + base.store("key1".into(), "base_value".into()).unwrap(); + + let mut overlay = OverlayStorage::new(base.clone_box()); + let taken = overlay.take(&"key1".into()).unwrap(); + + assert_eq!(taken, Some("base_value".into())); + assert_eq!(overlay.get(&"key1".into()).unwrap(), None); + + // Base should still have the value + assert_eq!( + base.get(&"key1".into()).unwrap(), + Some("base_value".into()) + ); + } + + #[test] + fn test_overlay_keys_merges_correctly() { + let mut base: Box> = + Box::new(StorageHashMap::::new()); + base.store("base_key".into(), "value".into()).unwrap(); + + let mut overlay = OverlayStorage::new(base); + overlay.store("overlay_key".into(), "value".into()).unwrap(); + + let keys = overlay.keys().unwrap(); + assert!(keys.contains(&"base_key".into())); + assert!(keys.contains(&"overlay_key".into())); + } + + #[test] + fn test_overlay_clear_ignores_base() { + let mut base: Box> = + Box::new(StorageHashMap::::new()); + base.store("key1".into(), "base_value".into()).unwrap(); + + let mut overlay = OverlayStorage::new(base.clone_box()); + overlay.clear().unwrap(); + + assert_eq!(overlay.get(&"key1".into()).unwrap(), None); + assert_eq!(overlay.keys().unwrap().len(), 0); + + // Base should still have the value + assert_eq!( + base.get(&"key1".into()).unwrap(), + Some("base_value".into()) + ); + } + + #[test] + fn test_overlay_clone_box_creates_independent_copy() { + let base: Box> = + Box::new(StorageHashMap::::new()); + + let mut overlay = OverlayStorage::new(base); + overlay.store("key1".into(), "value1".into()).unwrap(); + + let mut cloned = overlay.clone_box(); + cloned.store("key2".into(), "value2".into()).unwrap(); + + // Original should not have key2 + assert_eq!(overlay.get(&"key2".into()).unwrap(), None); + // Clone should have both + assert_eq!(cloned.get(&"key1".into()).unwrap(), Some("value1".into())); + assert_eq!(cloned.get(&"key2".into()).unwrap(), Some("value2".into())); + } + + #[test] + fn test_overlay_count_accounts_for_tombstones() { + let mut base: Box> = + Box::new(StorageHashMap::::new()); + base.store("key1".into(), "value1".into()).unwrap(); + base.store("key2".into(), "value2".into()).unwrap(); + + let mut overlay = OverlayStorage::new(base); + overlay.take(&"key1".into()).unwrap(); // Delete key1 + overlay.store("key3".into(), "value3".into()).unwrap(); // Add key3 + + // Should have key2 (from base) and key3 (from overlay), but not key1 (deleted) + assert_eq!(overlay.count().unwrap(), 2); + } + + #[test] + fn test_overlay_into_iter_merges_correctly() { + let mut base: Box> = + Box::new(StorageHashMap::::new()); + base.store("base_key".into(), "base_value".into()).unwrap(); + base.store("deleted_key".into(), "deleted_value".into()) + .unwrap(); + + let mut overlay = OverlayStorage::new(base); + overlay + .store("overlay_key".into(), "overlay_value".into()) + .unwrap(); + overlay.take(&"deleted_key".into()).unwrap(); + + let entries: Vec<(String, String)> = overlay.into_iter().unwrap().collect(); + + assert_eq!(entries.len(), 2); + assert!(entries.contains(&("base_key".into(), "base_value".into()))); + assert!(entries.contains(&("overlay_key".into(), "overlay_value".into()))); + assert!(!entries + .iter() + .any(|(k, _)| k == &String::from("deleted_key"))); + } +} diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 098bbe51d..64d9587cc 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -1052,12 +1052,9 @@ impl SurfnetSvmLocker { transaction: VersionedTransaction, tag: Option, ) -> SurfpoolContextualizedResult { - let mut svm_clone = self.with_svm_reader(|svm_reader| svm_reader.clone()); - - let (dummy_simnet_tx, _) = crossbeam_channel::bounded(1); - let (dummy_geyser_tx, _) = crossbeam_channel::bounded(1); - svm_clone.simnet_events_tx = dummy_simnet_tx; - svm_clone.geyser_events_tx = dummy_geyser_tx; + // Use clone_for_profiling to wrap all storage fields with overlay storage, + // ensuring mutations during profiling don't affect the underlying database + let svm_clone = self.with_svm_reader(|svm_reader| svm_reader.clone_for_profiling()); let svm_locker = SurfnetSvmLocker::new(svm_clone); @@ -1350,7 +1347,7 @@ impl SurfnetSvmLocker { pre_execution_capture_cursor.insert(pubkey, pre_account); } } - let mut svm_clone = self.with_svm_reader(|svm_reader| svm_reader.clone()); + let mut svm_clone = self.with_svm_reader(|svm_reader| svm_reader.clone_for_profiling()); let (dummy_simnet_tx, _) = crossbeam_channel::bounded(1); let (dummy_geyser_tx, _) = crossbeam_channel::bounded(1); diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs index d4f3ec216..819f16260 100644 --- a/crates/core/src/surfnet/surfnet_lite_svm.rs +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -14,7 +14,7 @@ use solana_transaction::versioned::VersionedTransaction; use crate::{ error::{SurfpoolError, SurfpoolResult}, - storage::{Storage, new_kv_store}, + storage::{OverlayStorage, Storage, new_kv_store}, surfnet::{GetAccountResult, locker::is_supported_token_program}, }; @@ -32,6 +32,19 @@ impl SurfnetLiteSvm { } } + /// Creates a clone of the SVM with overlay storage wrapper for the database. + /// This allows profiling transactions without affecting the underlying database. + /// All database writes are buffered in memory and discarded when the clone is dropped. + pub fn clone_for_profiling(&self) -> Self { + Self { + svm: self.svm.clone(), + db: self + .db + .as_ref() + .map(|db| OverlayStorage::wrap(db.clone_box())), + } + } + pub fn initialize( mut self, feature_set: FeatureSet, diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 3463aa412..44825c368 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -97,7 +97,7 @@ use crate::{ error::{SurfpoolError, SurfpoolResult}, rpc::utils::convert_transaction_metadata_from_canonical, scenarios::TemplateRegistry, - storage::{Storage, new_kv_store, new_kv_store_with_default}, + storage::{OverlayStorage, Storage, new_kv_store, new_kv_store_with_default}, surfnet::{ LogsSubscriptionData, locker::is_supported_token_program, surfnet_lite_svm::SurfnetLiteSvm, }, @@ -314,6 +314,80 @@ impl SurfnetSvm { self.account_associated_data.shutdown(); } + /// Creates a clone of the SVM with overlay storage wrappers for all database-backed fields. + /// This allows profiling transactions without affecting the underlying database. + /// All storage writes are buffered in memory and discarded when the clone is dropped. + pub fn clone_for_profiling(&self) -> Self { + let (dummy_simnet_tx, _) = crossbeam_channel::bounded(1); + let (dummy_geyser_tx, _) = crossbeam_channel::bounded(1); + + Self { + inner: self.inner.clone_for_profiling(), + remote_rpc_url: self.remote_rpc_url.clone(), + chain_tip: self.chain_tip.clone(), + + // Wrap all storage fields with OverlayStorage + blocks: OverlayStorage::wrap(self.blocks.clone_box()), + transactions: OverlayStorage::wrap(self.transactions.clone_box()), + profile_tag_map: OverlayStorage::wrap(self.profile_tag_map.clone_box()), + simulated_transaction_profiles: OverlayStorage::wrap( + self.simulated_transaction_profiles.clone_box(), + ), + executed_transaction_profiles: OverlayStorage::wrap( + self.executed_transaction_profiles.clone_box(), + ), + accounts_by_owner: OverlayStorage::wrap(self.accounts_by_owner.clone_box()), + account_associated_data: OverlayStorage::wrap(self.account_associated_data.clone_box()), + token_accounts: OverlayStorage::wrap(self.token_accounts.clone_box()), + token_mints: OverlayStorage::wrap(self.token_mints.clone_box()), + token_accounts_by_owner: OverlayStorage::wrap(self.token_accounts_by_owner.clone_box()), + token_accounts_by_delegate: OverlayStorage::wrap( + self.token_accounts_by_delegate.clone_box(), + ), + token_accounts_by_mint: OverlayStorage::wrap(self.token_accounts_by_mint.clone_box()), + registered_idls: OverlayStorage::wrap(self.registered_idls.clone_box()), + streamed_accounts: OverlayStorage::wrap(self.streamed_accounts.clone_box()), + scheduled_overrides: OverlayStorage::wrap(self.scheduled_overrides.clone_box()), + + // Clone non-storage fields normally + transactions_queued_for_confirmation: self.transactions_queued_for_confirmation.clone(), + transactions_queued_for_finalization: self.transactions_queued_for_finalization.clone(), + perf_samples: self.perf_samples.clone(), + transactions_processed: self.transactions_processed, + latest_epoch_info: self.latest_epoch_info.clone(), + + // Use dummy channels to prevent event propagation during profiling + simnet_events_tx: dummy_simnet_tx, + geyser_events_tx: dummy_geyser_tx, + + signature_subscriptions: self.signature_subscriptions.clone(), + account_subscriptions: self.account_subscriptions.clone(), + // Don't clone subscriptions - profiling clone shouldn't send notifications + slot_subscriptions: Vec::new(), + logs_subscriptions: Vec::new(), + snapshot_subscriptions: Vec::new(), + + updated_at: self.updated_at, + slot_time: self.slot_time, + start_time: self.start_time, + + total_supply: self.total_supply, + circulating_supply: self.circulating_supply, + non_circulating_supply: self.non_circulating_supply, + non_circulating_accounts: self.non_circulating_accounts.clone(), + genesis_config: self.genesis_config.clone(), + inflation: self.inflation, + write_version: self.write_version, + feature_set: self.feature_set.clone(), + instruction_profiling_enabled: self.instruction_profiling_enabled, + max_profiles: self.max_profiles, + runbook_executions: self.runbook_executions.clone(), + account_update_slots: self.account_update_slots.clone(), + recent_blockhashes: self.recent_blockhashes.clone(), + closed_accounts: self.closed_accounts.clone(), + } + } + /// Creates a new instance of `SurfnetSvm`. /// /// Returns a tuple containing the SVM instance, a receiver for simulation events, and a receiver for Geyser plugin events. From d091a49a1155d30593d637713c63b756dc6efa7a Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 13 Jan 2026 09:24:08 -0500 Subject: [PATCH 51/54] fix timeouts on flaky tests --- crates/core/src/tests/integration.rs | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index 020a44d67..46ef5ad14 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -65,9 +65,11 @@ fn wait_for_ready_and_connected(simnet_events_rx: &crossbeam_channel::Receiver { + println!("Simnet is ready"); ready = true; } Ok(SimnetEvent::Connected(_)) => { + println!("Simnet is connected"); connected = true; } _ => (), @@ -127,6 +129,7 @@ async fn test_simnet_ready(test_type: TestType) { async fn test_simnet_ticks(test_type: TestType) { let bind_host = "127.0.0.1"; let bind_port = get_free_port().unwrap(); + let ws_port = get_free_port().unwrap(); let config = SurfpoolConfig { simnets: vec![SimnetConfig { slot_time: 1, @@ -135,7 +138,7 @@ async fn test_simnet_ticks(test_type: TestType) { rpc: RpcConfig { bind_host: bind_host.to_string(), bind_port, - ..Default::default() + ws_port, }, ..SurfpoolConfig::default() }; @@ -193,6 +196,7 @@ async fn test_simnet_some_sol_transfers(test_type: TestType) { let airdrop_token_amount = LAMPORTS_PER_SOL; let bind_host = "127.0.0.1"; let bind_port = get_free_port().unwrap(); + let ws_port = get_free_port().unwrap(); let config = SurfpoolConfig { simnets: vec![SimnetConfig { slot_time: 1, @@ -203,7 +207,7 @@ async fn test_simnet_some_sol_transfers(test_type: TestType) { rpc: RpcConfig { bind_host: bind_host.to_string(), bind_port, - ..Default::default() + ws_port, }, ..SurfpoolConfig::default() }; @@ -348,6 +352,7 @@ async fn test_add_alt_entries_fetching(test_type: TestType) { let bind_host = "127.0.0.1"; let bind_port = get_free_port().unwrap(); + let ws_port = get_free_port().unwrap(); let airdrop_token_amount = LAMPORTS_PER_SOL; let config = SurfpoolConfig { simnets: vec![SimnetConfig { @@ -359,11 +364,12 @@ async fn test_add_alt_entries_fetching(test_type: TestType) { rpc: RpcConfig { bind_host: bind_host.to_string(), bind_port, - ..Default::default() + ws_port, }, ..SurfpoolConfig::default() }; + println!("Initializing SVM, binding to port {}", bind_port); let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); @@ -519,6 +525,7 @@ async fn test_simulate_add_alt_entries_fetching(test_type: TestType) { let bind_host = "127.0.0.1"; let bind_port = get_free_port().unwrap(); + let ws_port = get_free_port().unwrap(); let airdrop_token_amount = LAMPORTS_PER_SOL; let config = SurfpoolConfig { simnets: vec![SimnetConfig { @@ -530,7 +537,7 @@ async fn test_simulate_add_alt_entries_fetching(test_type: TestType) { rpc: RpcConfig { bind_host: bind_host.to_string(), bind_port, - ..Default::default() + ws_port, }, ..SurfpoolConfig::default() }; @@ -636,6 +643,7 @@ async fn test_simulate_transaction_no_signers(test_type: TestType) { let bind_host = "127.0.0.1"; let bind_port = get_free_port().unwrap(); + let ws_port = get_free_port().unwrap(); let airdrop_token_amount = LAMPORTS_PER_SOL; let config = SurfpoolConfig { simnets: vec![SimnetConfig { @@ -647,7 +655,7 @@ async fn test_simulate_transaction_no_signers(test_type: TestType) { rpc: RpcConfig { bind_host: bind_host.to_string(), bind_port, - ..Default::default() + ws_port, }, ..SurfpoolConfig::default() }; @@ -3339,7 +3347,8 @@ fn boot_simnet( }); loop { - if let Ok(SimnetEvent::Ready(_)) = simnet_events_rx.recv_timeout(Duration::from_millis(1000)) + if let Ok(SimnetEvent::Ready(_)) = + simnet_events_rx.recv_timeout(Duration::from_millis(1000)) { break; } @@ -4304,6 +4313,7 @@ fn start_surfnet( ) -> Result<(String, SurfnetSvmLocker), String> { let bind_host = "127.0.0.1"; let bind_port = get_free_port().unwrap(); + let ws_port = get_free_port().unwrap(); let offline_mode = datasource_rpc_url.is_none(); let config = SurfpoolConfig { @@ -4318,7 +4328,7 @@ fn start_surfnet( rpc: RpcConfig { bind_host: bind_host.to_string(), bind_port, - ..Default::default() + ws_port, }, ..SurfpoolConfig::default() }; From c404766b46ca9faac35bd6a4cbae4c905bcfc962 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 13 Jan 2026 09:24:38 -0500 Subject: [PATCH 52/54] add test to assert tx profiles don't update svm state --- crates/core/src/tests/integration.rs | 151 +++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index 46ef5ad14..3ba4f5621 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -6697,3 +6697,154 @@ fn test_nonce_accounts() { "Recipient account did not receive correct amount" ); } + +/// Tests that profiling a transaction does not mutate the original SVM state. +/// This verifies the OverlayStorage implementation correctly isolates mutations +/// during profiling from the underlying database. +#[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +#[tokio::test(flavor = "multi_thread")] +async fn test_profile_transaction_does_not_mutate_state(test_type: TestType) { + let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); + let rpc_server = crate::rpc::surfnet_cheatcodes::SurfnetCheatcodesRpc; + + // Setup: Create accounts and fund the payer + let payer = Keypair::new(); + let recipient = Pubkey::new_unique(); + let lamports_to_send = 1_000_000; + let initial_payer_balance = lamports_to_send * 10; + + svm_instance + .airdrop(&payer.pubkey(), initial_payer_balance) + .unwrap() + .unwrap(); + + // Create a transfer transaction + let instruction = transfer(&payer.pubkey(), &recipient, lamports_to_send); + let latest_blockhash = svm_instance.latest_blockhash(); + let message = + Message::new_with_blockhash(&[instruction], Some(&payer.pubkey()), &latest_blockhash); + let tx = VersionedTransaction::try_new(VersionedMessage::Legacy(message.clone()), &[&payer]) + .unwrap(); + + let tx_bytes = bincode::serialize(&tx).unwrap(); + let tx_b64 = base64::engine::general_purpose::STANDARD.encode(&tx_bytes); + + // Record initial state BEFORE profiling + let initial_transactions_processed = svm_instance.transactions_processed; + let initial_payer_account = svm_instance.get_account(&payer.pubkey()).unwrap(); + let initial_recipient_account = svm_instance.get_account(&recipient).ok().flatten(); + + // Create the locker and runloop context + let svm_locker = SurfnetSvmLocker::new(svm_instance); + let (simnet_cmd_tx, _simnet_cmd_rx) = crossbeam_unbounded::(); + let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); + + let runloop_context = RunloopContext { + id: None, + svm_locker: svm_locker.clone(), + simnet_commands_tx: simnet_cmd_tx, + plugin_manager_commands_tx: plugin_cmd_tx, + remote_rpc_client: None, + }; + + // Profile the transaction multiple times to ensure no state leakage + for i in 0..3 { + let response: JsonRpcResult> = rpc_server + .profile_transaction( + Some(runloop_context.clone()), + tx_b64.clone(), + Some(format!("test_isolation_{}", i)), + None, + ) + .await; + + assert!( + response.is_ok(), + "Profile transaction {} failed: {:?}", + i, + response.err() + ); + + let profile_result = response.unwrap().value; + assert!( + profile_result.transaction_profile.error_message.is_none(), + "Profile {} had unexpected error: {:?}", + i, + profile_result.transaction_profile.error_message + ); + + // The profile should show the transaction would succeed + assert!( + profile_result.transaction_profile.compute_units_consumed > 0, + "Profile {} should show compute units consumed", + i + ); + } + + // Verify state is UNCHANGED after profiling + let final_transactions_processed = svm_locker.with_svm_reader(|svm| svm.transactions_processed); + let final_payer_account = svm_locker + .with_svm_reader(|svm| svm.get_account(&payer.pubkey())) + .unwrap(); + let final_recipient_account = svm_locker + .with_svm_reader(|svm| svm.get_account(&recipient)) + .ok() + .flatten(); + + // Transaction count should not have increased from profiling + assert_eq!( + initial_transactions_processed, final_transactions_processed, + "transactions_processed should not change from profiling" + ); + + // Payer balance should be unchanged (transfer was only simulated) + assert_eq!( + initial_payer_account.as_ref().map(|a| a.lamports), + final_payer_account.as_ref().map(|a| a.lamports), + "Payer balance should not change from profiling" + ); + + // Recipient should still not exist or have the same balance + assert_eq!( + initial_recipient_account.as_ref().map(|a| a.lamports), + final_recipient_account.as_ref().map(|a| a.lamports), + "Recipient balance should not change from profiling" + ); + + // Now actually execute the transaction to prove the state can still be mutated + let execution_result = + svm_locker.with_svm_writer(|svm| svm.send_transaction(tx.clone(), false, false)); + + assert!( + execution_result.is_ok(), + "Actual transaction execution should succeed: {:?}", + execution_result.err() + ); + + // Verify state DID change after actual execution + let post_execution_transactions = svm_locker.with_svm_reader(|svm| svm.transactions_processed); + let post_execution_recipient = svm_locker + .with_svm_reader(|svm| svm.get_account(&recipient)) + .unwrap(); + + assert_eq!( + post_execution_transactions, + initial_transactions_processed + 1, + "Transaction count should increase after actual execution" + ); + + assert!( + post_execution_recipient.is_some(), + "Recipient should exist after actual execution" + ); + + assert_eq!( + post_execution_recipient.unwrap().lamports, + lamports_to_send, + "Recipient should have received funds after actual execution" + ); +} From 445175314702f6be9ed8966b4b772fd1904f814c Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 13 Jan 2026 09:47:59 -0500 Subject: [PATCH 53/54] add test to assert instruction profiling doesn't mutate db --- crates/core/src/tests/integration.rs | 180 +++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index 3ba4f5621..919da1593 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -6848,3 +6848,183 @@ async fn test_profile_transaction_does_not_mutate_state(test_type: TestType) { "Recipient should have received funds after actual execution" ); } + +/// Tests that instruction-level profiling during transaction execution does not +/// mutate the original SVM state for failed transactions. +/// This creates a transaction with two instructions where: +/// - First instruction: a transfer that would succeed +/// - Second instruction: a transfer that will fail (insufficient funds) +/// The test verifies that even though the first instruction is profiled successfully +/// during execution, its effects are not persisted since the overall transaction fails. +#[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +#[tokio::test(flavor = "multi_thread")] +async fn test_instruction_profiling_does_not_mutate_state(test_type: TestType) { + let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); + + // Verify instruction profiling is enabled by default + assert!( + svm_instance.instruction_profiling_enabled, + "Instruction profiling should be enabled by default" + ); + + // Setup: Create accounts + let payer = Keypair::new(); + let payer_without_funds = Keypair::new(); + let recipient = Pubkey::new_unique(); + let lamports_to_send = LAMPORTS_PER_SOL; + + // Fund only the first payer + svm_instance + .airdrop(&payer.pubkey(), lamports_to_send * 3) + .unwrap() + .unwrap(); + + // Record initial state BEFORE processing + let initial_payer_balance = svm_instance + .get_account(&payer.pubkey()) + .unwrap() + .map(|a| a.lamports) + .unwrap_or(0); + let initial_recipient_account = svm_instance.get_account(&recipient).ok().flatten(); + let initial_transactions_processed = svm_instance.transactions_processed; + + // Create a multi-instruction transaction where: + // - First instruction: valid transfer from payer to recipient (would succeed alone) + // - Second instruction: invalid transfer from unfunded account (will fail) + let valid_instruction = transfer(&payer.pubkey(), &recipient, lamports_to_send); + let invalid_instruction = + transfer(&payer_without_funds.pubkey(), &recipient, lamports_to_send); + + let latest_blockhash = svm_instance.latest_blockhash(); + let message = Message::new_with_blockhash( + &[valid_instruction, invalid_instruction], + Some(&payer.pubkey()), + &latest_blockhash, + ); + let transaction = VersionedTransaction::try_new( + VersionedMessage::Legacy(message), + &[&payer, &payer_without_funds], + ) + .unwrap(); + let signature = transaction.signatures[0]; + + // Create the locker and status channel for transaction processing + let svm_locker = SurfnetSvmLocker::new(svm_instance); + let (status_tx, _status_rx) = crossbeam_unbounded::(); + + // Process the transaction using the actual execution path + // This will trigger instruction-level profiling since it's enabled by default + let process_result = svm_locker + .process_transaction( + &None, // no remote context + transaction.clone(), + status_tx, + true, // skip_preflight + false, // sigverify + ) + .await; + + // The transaction should fail due to the second instruction + // But the profile result should still be written + assert!( + process_result.is_err() || process_result.is_ok(), + "process_transaction should complete (success or failure)" + ); + + // Retrieve the profile result using the signature + let key = UuidOrSignature::Signature(signature); + let profile_result = svm_locker + .get_profile_result(key, &RpcProfileResultConfig::default()) + .unwrap() + .expect("Profile result should exist for executed transaction"); + + // Verify the overall transaction failed (due to second instruction) + assert!( + profile_result.transaction_profile.error_message.is_some(), + "Transaction should fail due to second instruction's insufficient funds" + ); + + // Verify instruction profiles were generated + assert!( + profile_result.instruction_profiles.is_some(), + "Instruction profiles should be generated when instruction profiling is enabled" + ); + + let instruction_profiles = profile_result.instruction_profiles.as_ref().unwrap(); + assert_eq!( + instruction_profiles.len(), + 2, + "Should have profiles for both instructions" + ); + + // Verify first instruction profile shows SUCCESS (it was profiled independently) + let first_ix_profile = &instruction_profiles[0]; + assert!( + first_ix_profile.error_message.is_none(), + "First instruction profile should succeed: {:?}", + first_ix_profile.error_message + ); + assert!( + first_ix_profile.compute_units_consumed > 0, + "First instruction should have consumed compute units" + ); + + // Verify second instruction profile shows FAILURE + let second_ix_profile = &instruction_profiles[1]; + assert!( + second_ix_profile.error_message.is_some(), + "Second instruction should fail due to insufficient funds" + ); + + // NOW THE CRITICAL PART: Verify that instruction profiling didn't leak state + // Even though the first instruction was profiled successfully, its effects + // should NOT be persisted because: + // 1. The instruction profiling uses clone_for_profiling() with OverlayStorage + // 2. The overall transaction failed, so no state changes are committed + // + // Note: Failed transactions in Solana still deduct fees from the fee payer. + // This is expected behavior and not related to instruction profiling. + + let final_payer_balance = svm_locker + .with_svm_reader(|svm| svm.get_account(&payer.pubkey())) + .unwrap() + .map(|a| a.lamports) + .unwrap_or(0); + let final_recipient_account = svm_locker + .with_svm_reader(|svm| svm.get_account(&recipient)) + .ok() + .flatten(); + let final_transactions_processed = + svm_locker.with_svm_reader(|svm| svm.transactions_processed); + + // THE KEY ASSERTION: Recipient should NOT have received funds + // This proves that the first instruction's transfer (which was profiled successfully) + // was NOT committed to the actual state. The instruction profiling used + // clone_for_profiling() so its mutations were isolated. + assert_eq!( + initial_recipient_account.as_ref().map(|a| a.lamports), + final_recipient_account.as_ref().map(|a| a.lamports), + "Recipient should not have received funds - instruction profiling must not leak state" + ); + + // Payer balance should only decrease by the transaction fee (not by the transfer amount) + // Failed transactions still pay fees in Solana, but the transfer should not have occurred + let balance_decrease = initial_payer_balance.saturating_sub(final_payer_balance); + assert!( + balance_decrease < lamports_to_send, + "Payer should only lose transaction fee, not the transfer amount. Lost: {} lamports", + balance_decrease + ); + + // Transaction count increments even for failed transactions (they were still processed) + // This is expected behavior - we're verifying instruction profiling isolation, not tx count + assert_eq!( + final_transactions_processed, + initial_transactions_processed + 1, + "Transaction count should increment after processing (even for failed tx)" + ); +} From 23770ab8d474ce4880e5ff055d2f64a7f23467d7 Mon Sep 17 00:00:00 2001 From: MicaiahReid Date: Tue, 13 Jan 2026 09:53:46 -0500 Subject: [PATCH 54/54] fmt --- crates/core/src/rpc/full.rs | 43 +++++++++++++++++++--------- crates/core/src/storage/fifo_map.rs | 3 +- crates/core/src/storage/hash_map.rs | 3 +- crates/core/src/storage/mod.rs | 14 ++++----- crates/core/src/storage/overlay.rs | 41 +++++++++++++------------- crates/core/src/storage/postgres.rs | 24 ++++++++++++---- crates/core/src/storage/sqlite.rs | 6 ++-- crates/core/src/surfnet/svm.rs | 3 +- crates/core/src/tests/integration.rs | 12 ++++---- crates/core/src/types.rs | 6 ++-- 10 files changed, 93 insertions(+), 62 deletions(-) diff --git a/crates/core/src/rpc/full.rs b/crates/core/src/rpc/full.rs index 2154dc869..122231867 100644 --- a/crates/core/src/rpc/full.rs +++ b/crates/core/src/rpc/full.rs @@ -2313,7 +2313,11 @@ impl Full for SurfpoolFullRpc { .iter() .filter_map(|signature| { // Check if the signature exists in the transactions map - transactions.get(&signature.to_string()).ok().flatten().map(|tx| (slot, tx)) + transactions + .get(&signature.to_string()) + .ok() + .flatten() + .map(|tx| (slot, tx)) }) .collect::>() }) @@ -2533,10 +2537,16 @@ mod tests { ..Default::default() }; let mutated_accounts = std::collections::HashSet::new(); - writer.transactions.store( - sig.to_string(), - SurfnetTransactionStatus::processed(tx_with_status_meta, mutated_accounts), - ).unwrap(); + writer + .transactions + .store( + sig.to_string(), + SurfnetTransactionStatus::processed( + tx_with_status_meta, + mutated_accounts, + ), + ) + .unwrap(); status_tx .send(TransactionStatusEvent::Success( TransactionConfirmationStatus::Confirmed, @@ -2724,7 +2734,11 @@ mod tests { "transaction is not found in the SVM" ); assert!( - state_reader.transactions.get(&sig.to_string()).unwrap().is_some(), + state_reader + .transactions + .get(&sig.to_string()) + .unwrap() + .is_some(), "transaction is not found in the history" ); } @@ -4565,13 +4579,16 @@ mod tests { ..Default::default() }; let mutated_accounts = std::collections::HashSet::new(); - writer.transactions.store( - sig.to_string(), - SurfnetTransactionStatus::processed( - tx_with_status_meta, - mutated_accounts, - ), - ).unwrap(); + writer + .transactions + .store( + sig.to_string(), + SurfnetTransactionStatus::processed( + tx_with_status_meta, + mutated_accounts, + ), + ) + .unwrap(); status_tx .send(TransactionStatusEvent::Success( TransactionConfirmationStatus::Processed, diff --git a/crates/core/src/storage/fifo_map.rs b/crates/core/src/storage/fifo_map.rs index 0f1ece473..461290c59 100644 --- a/crates/core/src/storage/fifo_map.rs +++ b/crates/core/src/storage/fifo_map.rs @@ -1,5 +1,6 @@ -use serde::{Deserialize, Serialize}; use std::hash::Hash; + +use serde::{Deserialize, Serialize}; use surfpool_types::FifoMap; impl super::Storage for FifoMap diff --git a/crates/core/src/storage/hash_map.rs b/crates/core/src/storage/hash_map.rs index a84e53b71..2af4c2448 100644 --- a/crates/core/src/storage/hash_map.rs +++ b/crates/core/src/storage/hash_map.rs @@ -1,7 +1,8 @@ -use serde::{Deserialize, Serialize}; pub use std::collections::HashMap; use std::hash::Hash; +use serde::{Deserialize, Serialize}; + impl super::Storage for HashMap where K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static + std::cmp::Eq + Hash, diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs index bbb5f381b..fb74588ba 100644 --- a/crates/core/src/storage/mod.rs +++ b/crates/core/src/storage/mod.rs @@ -189,11 +189,7 @@ impl StorageError { QueryExecuteError::GetAllKeyValuePairsError(e), ) } - pub fn count( - table_name: &str, - db_type: &str, - e: surfpool_db::diesel::result::Error, - ) -> Self { + pub fn count(table_name: &str, db_type: &str, e: surfpool_db::diesel::result::Error) -> Self { StorageError::QueryError( table_name.to_string(), db_type.to_string(), @@ -267,9 +263,11 @@ pub trait StorageConstructor: Storage + Clone { #[cfg(test)] pub mod tests { - use std::collections::hash_map::RandomState; - use std::hash::{BuildHasher, Hasher}; - use std::os::unix::fs::PermissionsExt; + use std::{ + collections::hash_map::RandomState, + hash::{BuildHasher, Hasher}, + os::unix::fs::PermissionsExt, + }; use crossbeam_channel::Receiver; use surfpool_types::SimnetEvent; diff --git a/crates/core/src/storage/overlay.rs b/crates/core/src/storage/overlay.rs index 3cc41a522..a83404cc9 100644 --- a/crates/core/src/storage/overlay.rs +++ b/crates/core/src/storage/overlay.rs @@ -1,6 +1,8 @@ -use std::collections::{HashMap, HashSet}; -use std::hash::Hash; -use std::sync::{Arc, RwLock}; +use std::{ + collections::{HashMap, HashSet}, + hash::Hash, + sync::{Arc, RwLock}, +}; use serde::{Deserialize, Serialize}; @@ -84,7 +86,10 @@ where drop(overlay); // Release read lock before querying base // Check if base was cleared - let base_cleared = self.base_cleared.read().map_err(|_| StorageError::LockError)?; + let base_cleared = self + .base_cleared + .read() + .map_err(|_| StorageError::LockError)?; if *base_cleared { return Ok(None); } @@ -115,7 +120,10 @@ where drop(overlay); // Check if base was cleared - let base_cleared = self.base_cleared.read().map_err(|_| StorageError::LockError)?; + let base_cleared = self + .base_cleared + .read() + .map_err(|_| StorageError::LockError)?; if *base_cleared { return Ok(None); } @@ -282,10 +290,7 @@ mod tests { ); // Base should still have original value - assert_eq!( - base.get(&"key1".into()).unwrap(), - Some("base_value".into()) - ); + assert_eq!(base.get(&"key1".into()).unwrap(), Some("base_value".into())); } #[test] @@ -315,10 +320,7 @@ mod tests { assert_eq!(overlay.get(&"key1".into()).unwrap(), None); // Base should still have the value - assert_eq!( - base.get(&"key1".into()).unwrap(), - Some("base_value".into()) - ); + assert_eq!(base.get(&"key1".into()).unwrap(), Some("base_value".into())); } #[test] @@ -348,10 +350,7 @@ mod tests { assert_eq!(overlay.keys().unwrap().len(), 0); // Base should still have the value - assert_eq!( - base.get(&"key1".into()).unwrap(), - Some("base_value".into()) - ); + assert_eq!(base.get(&"key1".into()).unwrap(), Some("base_value".into())); } #[test] @@ -406,8 +405,10 @@ mod tests { assert_eq!(entries.len(), 2); assert!(entries.contains(&("base_key".into(), "base_value".into()))); assert!(entries.contains(&("overlay_key".into(), "overlay_value".into()))); - assert!(!entries - .iter() - .any(|(k, _)| k == &String::from("deleted_key"))); + assert!( + !entries + .iter() + .any(|(k, _)| k == &String::from("deleted_key")) + ); } } diff --git a/crates/core/src/storage/postgres.rs b/crates/core/src/storage/postgres.rs index f26c1e272..43df79c13 100644 --- a/crates/core/src/storage/postgres.rs +++ b/crates/core/src/storage/postgres.rs @@ -1,5 +1,7 @@ -use std::collections::HashMap; -use std::sync::{Mutex, OnceLock}; +use std::{ + collections::HashMap, + sync::{Mutex, OnceLock}, +}; use log::debug; use serde::{Deserialize, Serialize}; @@ -16,18 +18,28 @@ use crate::storage::{Storage, StorageConstructor, StorageError, StorageResult}; /// Global shared connection pools keyed by database URL. /// This allows multiple PostgresStorage instances to share the same pool, /// which is essential for tests that run in parallel. -static SHARED_POOLS: OnceLock>>>> = OnceLock::new(); +static SHARED_POOLS: OnceLock< + Mutex>>>, +> = OnceLock::new(); -fn get_or_create_shared_pool(database_url: &str) -> StorageResult>> { +fn get_or_create_shared_pool( + database_url: &str, +) -> StorageResult>> { let pools = SHARED_POOLS.get_or_init(|| Mutex::new(HashMap::new())); let mut pools_guard = pools.lock().map_err(|_| StorageError::LockError)?; if let Some(pool) = pools_guard.get(database_url) { - debug!("Reusing existing shared PostgreSQL connection pool for {}", database_url); + debug!( + "Reusing existing shared PostgreSQL connection pool for {}", + database_url + ); return Ok(pool.clone()); } - debug!("Creating new shared PostgreSQL connection pool for {}", database_url); + debug!( + "Creating new shared PostgreSQL connection pool for {}", + database_url + ); let manager = ConnectionManager::::new(database_url); let pool = Pool::builder() .max_size(10) // Limit total connections across all tests diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs index 83a6abf99..6cd4a7cd8 100644 --- a/crates/core/src/storage/sqlite.rs +++ b/crates/core/src/storage/sqlite.rs @@ -1,5 +1,7 @@ -use std::collections::HashSet; -use std::sync::{Mutex, OnceLock}; +use std::{ + collections::HashSet, + sync::{Mutex, OnceLock}, +}; use log::debug; use serde::{Deserialize, Serialize}; diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index 44825c368..2fa7344c5 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -3172,9 +3172,8 @@ mod tests { use spl_token_interface::state::{Account as TokenAccount, AccountState}; use test_case::test_case; - use crate::storage::tests::TestType; - use super::*; + use crate::storage::tests::TestType; #[test_case(TestType::sqlite(); "with on-disk sqlite db")] #[test_case(TestType::in_memory(); "with in-memory sqlite db")] diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index 919da1593..ae3f6ff2d 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -6896,8 +6896,7 @@ async fn test_instruction_profiling_does_not_mutate_state(test_type: TestType) { // - First instruction: valid transfer from payer to recipient (would succeed alone) // - Second instruction: invalid transfer from unfunded account (will fail) let valid_instruction = transfer(&payer.pubkey(), &recipient, lamports_to_send); - let invalid_instruction = - transfer(&payer_without_funds.pubkey(), &recipient, lamports_to_send); + let invalid_instruction = transfer(&payer_without_funds.pubkey(), &recipient, lamports_to_send); let latest_blockhash = svm_instance.latest_blockhash(); let message = Message::new_with_blockhash( @@ -6920,11 +6919,11 @@ async fn test_instruction_profiling_does_not_mutate_state(test_type: TestType) { // This will trigger instruction-level profiling since it's enabled by default let process_result = svm_locker .process_transaction( - &None, // no remote context + &None, // no remote context transaction.clone(), status_tx, - true, // skip_preflight - false, // sigverify + true, // skip_preflight + false, // sigverify ) .await; @@ -6998,8 +6997,7 @@ async fn test_instruction_profiling_does_not_mutate_state(test_type: TestType) { .with_svm_reader(|svm| svm.get_account(&recipient)) .ok() .flatten(); - let final_transactions_processed = - svm_locker.with_svm_reader(|svm| svm.transactions_processed); + let final_transactions_processed = svm_locker.with_svm_reader(|svm| svm.transactions_processed); // THE KEY ASSERTION: Recipient should NOT have received funds // This proves that the first instruction's transfer (which was profiled successfully) diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index e0fad8695..9a43d559f 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -7,7 +7,10 @@ use chrono::Utc; use litesvm::types::TransactionMetadata; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use solana_account::Account; -use solana_account_decoder::parse_token::UiTokenAmount; +use solana_account_decoder::{ + parse_account_data::{AccountAdditionalDataV3, SplTokenAdditionalDataV2}, + parse_token::UiTokenAmount, +}; use solana_clock::{Epoch, Slot}; use solana_hash::Hash; use solana_message::{ @@ -33,7 +36,6 @@ use solana_transaction_status::{ parse_accounts::{parse_legacy_message_accounts, parse_v0_message_accounts}, parse_ui_inner_instructions, }; -use solana_account_decoder::parse_account_data::{AccountAdditionalDataV3, SplTokenAdditionalDataV2}; use spl_token_2022_interface::extension::{ StateWithExtensions, interest_bearing_mint::InterestBearingConfig, scaled_ui_amount::ScaledUiAmountConfig,