diff --git a/Cargo.lock b/Cargo.lock index e9f252ecb5..b209e3506c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -829,6 +829,18 @@ dependencies = [ "futures-core", ] +[[package]] +name = "async-channel" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" version = "0.4.32" @@ -2244,6 +2256,7 @@ dependencies = [ "account-compression", "anchor-lang", "anyhow", + "async-channel 2.5.0", "async-stream", "async-trait", "base64 0.22.1", @@ -2270,6 +2283,7 @@ dependencies = [ "light-ctoken-types", "light-hash-set", "light-hasher", + "light-indexed-array", "light-merkle-tree-metadata", "light-merkle-tree-reference", "light-program-test", @@ -2310,6 +2324,7 @@ version = "2.0.0" dependencies = [ "account-compression", "anchor-lang", + "anyhow", "async-stream", "async-trait", "bb8", @@ -2325,11 +2340,15 @@ dependencies = [ "light-hasher", "light-indexed-merkle-tree", "light-merkle-tree-metadata", + "light-merkle-tree-reference", "light-prover-client", "light-registry", "light-sdk", "light-sparse-merkle-tree", + "num-bigint 0.4.6", "num-traits", + "serde", + "serde_json", "solana-sdk", "thiserror 2.0.17", "tokio", @@ -3971,6 +3990,7 @@ dependencies = [ "ark-bn254 0.5.0", "ark-serialize 0.5.0", "ark-std 0.5.0", + "light-compressed-account", "light-hasher", "light-indexed-array", "light-indexed-merkle-tree", @@ -8619,7 +8639,7 @@ version = "2.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5643516e5206b89dd4bdf67c39815606d835a51a13260e43349abdb92d241b1d" dependencies = [ - "async-channel", + "async-channel 1.9.0", "bytes", "crossbeam-channel", "dashmap 5.5.3", diff --git a/Cargo.toml b/Cargo.toml index 8d2ed244ee..3992ea327a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] + members = [ "program-libs/account-checks", "program-libs/array-map", diff --git a/forester-utils/Cargo.toml b/forester-utils/Cargo.toml index 9c60fed55a..3e31886759 100644 --- a/forester-utils/Cargo.toml +++ b/forester-utils/Cargo.toml @@ -21,6 +21,7 @@ light-indexed-merkle-tree = { workspace = true } light-compressed-account = { workspace = true, features = ["std"] } light-batched-merkle-tree = { workspace = true } light-merkle-tree-metadata = { workspace = true } +light-merkle-tree-reference = { workspace = true } light-sparse-merkle-tree = { workspace = true } light-account-checks = { workspace = true } light-sdk = { workspace = true } @@ -39,10 +40,12 @@ anchor-lang = { workspace = true } solana-sdk = { workspace = true } thiserror = { workspace = true } +anyhow = { workspace = true } tracing = { workspace = true } num-traits = { workspace = true } +num-bigint = { workspace = true } bb8 = { workspace = true } async-trait = { workspace = true } @@ -51,3 +54,5 @@ governor = { workspace = true } [dev-dependencies] tokio-postgres = "0.7" bs58 = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } diff --git a/forester-utils/src/address_staging_tree.rs b/forester-utils/src/address_staging_tree.rs new file mode 100644 index 0000000000..207d20295b --- /dev/null +++ b/forester-utils/src/address_staging_tree.rs @@ -0,0 +1,166 @@ +use light_batched_merkle_tree::constants::DEFAULT_BATCH_ADDRESS_TREE_HEIGHT; +use light_hasher::Poseidon; +use light_prover_client::proof_types::batch_address_append::{ + get_batch_address_append_circuit_inputs, BatchAddressAppendInputs, +}; +use light_sparse_merkle_tree::{ + changelog::ChangelogEntry, indexed_changelog::IndexedChangelogEntry, SparseMerkleTree, +}; +use tracing::debug; + +use crate::error::ForesterUtilsError; + +const HEIGHT: usize = DEFAULT_BATCH_ADDRESS_TREE_HEIGHT as usize; + +/// Result of processing a batch of address appends +#[derive(Clone, Debug)] +pub struct AddressBatchResult { + pub circuit_inputs: BatchAddressAppendInputs, + pub new_root: [u8; 32], + pub old_root: [u8; 32], +} + +/// Staging tree for indexed (address) Merkle trees. +/// Uses SparseMerkleTree and changelogs to properly compute proofs +/// for batch address appends with concurrent updates. +#[derive(Clone, Debug)] +pub struct AddressStagingTree { + sparse_tree: SparseMerkleTree, + changelog: Vec>, + indexed_changelog: Vec>, + current_root: [u8; 32], + next_index: usize, +} + +impl AddressStagingTree { + /// Creates a new AddressStagingTree from subtrees data. + /// + /// # Arguments + /// * `subtrees` - Array of subtree hashes for SparseMerkleTree initialization + /// * `start_index` - The tree's next_index where new leaves will be appended + /// * `initial_root` - The current root of the tree + pub fn new(subtrees: [[u8; 32]; HEIGHT], start_index: usize, initial_root: [u8; 32]) -> Self { + debug!( + "AddressStagingTree::new: start_index={}, initial_root={:?}[..4]", + start_index, + &initial_root[..4] + ); + + Self { + sparse_tree: SparseMerkleTree::new(subtrees, start_index), + changelog: Vec::new(), + indexed_changelog: Vec::new(), + current_root: initial_root, + next_index: start_index, + } + } + + /// Creates a new AddressStagingTree from a Vec of subtrees. + /// The subtrees Vec must have exactly HEIGHT elements. + pub fn from_subtrees_vec( + subtrees: Vec<[u8; 32]>, + start_index: usize, + initial_root: [u8; 32], + ) -> Result { + let subtrees_array: [[u8; 32]; HEIGHT] = + subtrees.try_into().map_err(|v: Vec<[u8; 32]>| { + ForesterUtilsError::AddressStagingTree(format!( + "Invalid subtrees length: expected {}, got {}", + HEIGHT, + v.len() + )) + })?; + Ok(Self::new(subtrees_array, start_index, initial_root)) + } + + /// Returns the current root of the tree. + pub fn current_root(&self) -> [u8; 32] { + self.current_root + } + + /// Returns the current next_index of the tree. + pub fn next_index(&self) -> usize { + self.next_index + } + + /// Processes a batch of address appends and returns the circuit inputs. + /// + /// # Arguments + /// * `addresses` - The new addresses (element values) to append + /// * `low_element_values` - Values of low elements + /// * `low_element_next_values` - Next values of low elements + /// * `low_element_indices` - Indices of low elements + /// * `low_element_next_indices` - Next indices of low elements + /// * `low_element_proofs` - Merkle proofs for low elements + /// * `leaves_hashchain` - Pre-computed hash chain of the addresses + /// * `zkp_batch_size` - Number of addresses in this batch + #[allow(clippy::too_many_arguments)] + pub fn process_batch( + &mut self, + addresses: Vec<[u8; 32]>, + low_element_values: Vec<[u8; 32]>, + low_element_next_values: Vec<[u8; 32]>, + low_element_indices: Vec, + low_element_next_indices: Vec, + low_element_proofs: Vec>, + leaves_hashchain: [u8; 32], + zkp_batch_size: usize, + ) -> Result { + let old_root = self.current_root; + let start_index = self.next_index; + + debug!( + "AddressStagingTree::process_batch: {} addresses, start_index={}, old_root={:?}[..4]", + addresses.len(), + start_index, + &old_root[..4] + ); + + let circuit_inputs = get_batch_address_append_circuit_inputs::( + start_index, + old_root, + low_element_values, + low_element_next_values, + low_element_indices, + low_element_next_indices, + low_element_proofs, + addresses, + &mut self.sparse_tree, + leaves_hashchain, + zkp_batch_size, + &mut self.changelog, + &mut self.indexed_changelog, + ) + .map_err(|e| { + ForesterUtilsError::AddressStagingTree(format!("Circuit input error: {}", e)) + })?; + + // Update state + let new_root = + light_hasher::bigint::bigint_to_be_bytes_array::<32>(&circuit_inputs.new_root) + .map_err(|e| { + ForesterUtilsError::AddressStagingTree(format!("Root conversion error: {}", e)) + })?; + + self.current_root = new_root; + self.next_index += zkp_batch_size; + + debug!( + "AddressStagingTree::process_batch complete: new_root={:?}[..4], next_index={}", + &new_root[..4], + self.next_index + ); + + Ok(AddressBatchResult { + circuit_inputs, + new_root, + old_root, + }) + } + + /// Clears the changelogs. Call this when resetting the staging tree. + pub fn clear_changelogs(&mut self) { + self.changelog.clear(); + self.indexed_changelog.clear(); + } +} diff --git a/forester-utils/src/error.rs b/forester-utils/src/error.rs index 9e7500c217..9708038c52 100644 --- a/forester-utils/src/error.rs +++ b/forester-utils/src/error.rs @@ -28,4 +28,10 @@ pub enum ForesterUtilsError { #[error("pool error: {0}")] Pool(#[from] PoolError), + + #[error("error: {0}")] + StagingTree(String), + + #[error("address staging tree error: {0}")] + AddressStagingTree(String), } diff --git a/forester-utils/src/forester_epoch.rs b/forester-utils/src/forester_epoch.rs index 582dabef29..5ec755f567 100644 --- a/forester-utils/src/forester_epoch.rs +++ b/forester-utils/src/forester_epoch.rs @@ -182,6 +182,23 @@ impl TreeForesterSchedule { pub fn is_eligible(&self, forester_slot: u64) -> bool { self.slots[forester_slot as usize].is_some() } + + /// Returns the end solana slot of the last consecutive eligible slot + /// starting from the given light slot index. + pub fn get_consecutive_eligibility_end(&self, from_slot_idx: usize) -> Option { + let mut last_eligible_end = None; + + for slot_opt in self.slots.iter().skip(from_slot_idx) { + match slot_opt { + Some(slot) => { + last_eligible_end = Some(slot.end_solana_slot); + } + None => break, + } + } + + last_eligible_end + } } #[derive(Debug, Clone, AnchorSerialize, AnchorDeserialize, Default, PartialEq, Eq)] diff --git a/forester-utils/src/indexed_tree_processor.rs b/forester-utils/src/indexed_tree_processor.rs new file mode 100644 index 0000000000..d281270ea6 --- /dev/null +++ b/forester-utils/src/indexed_tree_processor.rs @@ -0,0 +1,288 @@ +use std::collections::HashMap; + +use light_batched_merkle_tree::constants::DEFAULT_BATCH_ADDRESS_TREE_HEIGHT; +use light_concurrent_merkle_tree::light_hasher::Hasher; +use light_hasher::Poseidon; +use light_indexed_merkle_tree::{array::IndexedElement, reference::IndexedMerkleTree}; +use num_bigint::BigUint; +use tracing::{debug, warn}; + +use crate::error::ForesterUtilsError; + +const HEIGHT: usize = DEFAULT_BATCH_ADDRESS_TREE_HEIGHT as usize; + +/// Decode node index from encoded format: (level << 56) | position +#[inline] +fn decode_node_index(encoded: u64) -> (u8, u64) { + let level = (encoded >> 56) as u8; + let position = encoded & 0x00FF_FFFF_FFFF_FFFF; + (level, position) +} + +/// Result of processing a batch of address appends using IndexedMerkleTree +#[derive(Clone, Debug)] +pub struct AddressBatchResult { + /// Proofs for the new elements (one proof per address in the batch) + pub new_element_proofs: Vec>, + /// The final root after all batch operations + pub new_root: [u8; 32], + /// The initial root before batch operations + pub old_root: [u8; 32], + /// Public input hash for the circuit + pub public_input_hash: [u8; 32], +} + +/// Tree processor using reference IndexedMerkleTree implementation +/// Initialized from indexer's node data, processes batches sequentially +pub struct IndexedTreeProcessor { + tree: IndexedMerkleTree, + next_index: usize, +} + +impl IndexedTreeProcessor { + /// Creates a new IndexedTreeProcessor by initializing an IndexedMerkleTree + /// from the node data provided by the indexer. + /// + /// # Arguments + /// * `nodes` - Encoded node indices from indexer (format: level << 56 | position) + /// * `node_hashes` - Corresponding node hashes + /// * `start_index` - The tree's next_index where new leaves will be appended + /// * `initial_root` - The current root of the tree (for verification) + /// * `tree_height` - Height of the merkle tree + /// + /// # Returns + /// A new IndexedTreeProcessor ready to process batches + pub fn new( + nodes: Vec, + node_hashes: Vec<[u8; 32]>, + start_index: usize, + initial_root: [u8; 32], + tree_height: usize, + ) -> Result { + debug!( + "IndexedTreeProcessor::new: start_index={}, initial_root={:?}[..4], nodes_count={}", + start_index, + &initial_root[..4], + nodes.len() + ); + + if tree_height != HEIGHT { + return Err(ForesterUtilsError::AddressStagingTree(format!( + "Invalid tree height: expected {}, got {}", + HEIGHT, tree_height + ))); + } + + // Build a map of (level, position) -> hash for efficient lookup + let mut node_map: HashMap<(u8, u64), [u8; 32]> = HashMap::new(); + for (encoded_idx, hash) in nodes.iter().zip(node_hashes.iter()) { + let (level, position) = decode_node_index(*encoded_idx); + node_map.insert((level, position), *hash); + } + + debug!("Built node map with {} entries", node_map.len()); + + // Create IndexedMerkleTree using its constructor + let tree = IndexedMerkleTree::::new(HEIGHT, 0).map_err(|e| { + ForesterUtilsError::AddressStagingTree(format!( + "Failed to create IndexedMerkleTree: {}", + e + )) + })?; + + warn!( + "IndexedTreeProcessor initialized with {} node entries (full reconstruction not yet implemented)", + node_map.len() + ); + + Ok(Self { + tree, + next_index: start_index, + }) + } + + /// Simpler initialization from subtrees (existing pattern) + /// Use this for now until we implement full node reconstruction + pub fn from_subtrees( + subtrees: Vec<[u8; 32]>, + start_index: usize, + initial_root: [u8; 32], + ) -> Result { + debug!( + "IndexedTreeProcessor::from_subtrees: start_index={}, initial_root={:?}[..4]", + start_index, + &initial_root[..4] + ); + + let _subtrees_array: [[u8; 32]; HEIGHT] = + subtrees.try_into().map_err(|v: Vec<[u8; 32]>| { + ForesterUtilsError::AddressStagingTree(format!( + "Invalid subtrees length: expected {}, got {}", + HEIGHT, + v.len() + )) + })?; + + // Use the proper constructor which initializes with zero leaf + let mut tree = IndexedMerkleTree::::new(HEIGHT, 0).map_err(|e| { + ForesterUtilsError::AddressStagingTree(format!( + "Failed to create IndexedMerkleTree: {}", + e + )) + })?; + + // Initialize the tree with the first two elements (required for indexed trees) + tree.init().map_err(|e| { + ForesterUtilsError::AddressStagingTree(format!( + "Failed to initialize indexed tree: {}", + e + )) + })?; + + Ok(Self { + tree, + next_index: start_index, + }) + } + + /// Returns the current root of the tree + pub fn current_root(&self) -> [u8; 32] { + self.tree.root() + } + + /// Returns the current next_index of the tree + pub fn next_index(&self) -> usize { + self.next_index + } + + /// Processes a batch of address appends sequentially, computing circuit inputs. + /// # Arguments + /// * `addresses` - The new addresses to append (as raw bytes) + /// * `low_element_values` - Values of low elements (addresses) + /// * `low_element_next_values` - Next values of low elements + /// * `low_element_indices` - Indices of low elements in the tree + /// * `low_element_next_indices` - Next indices referenced by low elements + /// * `low_element_proofs` - Merkle proofs for low elements (not used in new approach) + /// * `leaves_hashchain` - Pre-computed hash chain of the addresses + /// * `zkp_batch_size` - Number of addresses in this batch + #[allow(clippy::too_many_arguments)] + pub fn process_batch( + &mut self, + addresses: Vec<[u8; 32]>, + low_element_values: Vec<[u8; 32]>, + low_element_next_values: Vec<[u8; 32]>, + low_element_indices: Vec, + low_element_next_indices: Vec, + _low_element_proofs: Vec>, // Not used - we compute from tree + leaves_hashchain: [u8; 32], + zkp_batch_size: usize, + ) -> Result { + let old_root = self.current_root(); + let start_index = self.next_index; + + debug!( + "IndexedTreeProcessor::process_batch: {} addresses, start_index={}, old_root={:?}[..4]", + addresses.len(), + start_index, + &old_root[..4] + ); + + if addresses.len() != zkp_batch_size { + return Err(ForesterUtilsError::AddressStagingTree(format!( + "Address count mismatch: got {}, expected {}", + addresses.len(), + zkp_batch_size + ))); + } + + let mut new_element_proofs = Vec::with_capacity(zkp_batch_size); + + // Process each address sequentially + for i in 0..zkp_batch_size { + let address_bigint = BigUint::from_bytes_be(&addresses[i]); + let low_value_bigint = BigUint::from_bytes_be(&low_element_values[i]); + let next_value_bigint = BigUint::from_bytes_be(&low_element_next_values[i]); + + // Get proof BEFORE update (this is critical!) + let new_index = start_index + i; + let proof = self.tree.get_proof_of_leaf(new_index, true).map_err(|e| { + ForesterUtilsError::AddressStagingTree(format!( + "Failed to get proof for index {}: {}", + new_index, e + )) + })?; + + new_element_proofs.push(proof.as_slice().to_vec()); + + // Create IndexedElement structs for the update + let new_low_element = IndexedElement { + index: low_element_indices[i], + value: low_value_bigint, + next_index: low_element_next_indices[i], + }; + + let new_element = IndexedElement { + index: new_index, + value: address_bigint.clone(), + next_index: low_element_next_indices[i], + }; + + // Perform the update (modifies tree in-memory) + self.tree + .update(&new_low_element, &new_element, &next_value_bigint) + .map_err(|e| { + ForesterUtilsError::AddressStagingTree(format!( + "Failed to update tree at index {}: {}", + i, e + )) + })?; + + debug!( + "Processed address {}/{}: new_root={:?}[..4]", + i + 1, + zkp_batch_size, + &self.tree.root()[..4] + ); + } + + let new_root = self.current_root(); + self.next_index += zkp_batch_size; + + // Compute public input hash: hash(old_root, new_root, hashchain, start_index) + let public_input_hash = + Self::compute_public_input_hash(&old_root, &new_root, &leaves_hashchain, start_index)?; + + debug!( + "IndexedTreeProcessor::process_batch complete: new_root={:?}[..4], next_index={}", + &new_root[..4], + self.next_index + ); + + Ok(AddressBatchResult { + new_element_proofs, + new_root, + old_root, + public_input_hash, + }) + } + + /// Compute public input hash for the circuit + /// Formula: hash(old_root, new_root, hashchain, start_index) + fn compute_public_input_hash( + old_root: &[u8; 32], + new_root: &[u8; 32], + hashchain: &[u8; 32], + start_index: usize, + ) -> Result<[u8; 32], ForesterUtilsError> { + let start_index_bytes = (start_index as u64).to_be_bytes(); + let mut start_index_32 = [0u8; 32]; + start_index_32[24..].copy_from_slice(&start_index_bytes); + + Poseidon::hashv(&[old_root, new_root, hashchain, &start_index_32]).map_err(|e| { + ForesterUtilsError::AddressStagingTree(format!( + "Failed to compute public input hash: {}", + e + )) + }) + } +} diff --git a/forester-utils/src/instructions/address_batch_update.rs b/forester-utils/src/instructions/address_batch_update.rs index 97a8cd24b4..ca1bc8d18a 100644 --- a/forester-utils/src/instructions/address_batch_update.rs +++ b/forester-utils/src/instructions/address_batch_update.rs @@ -94,7 +94,6 @@ async fn stream_instruction_data<'a, R: Rpc>( } }; - // Log Photon response details debug!( "Photon response for chunk {}: received {} addresses, batch_start_index={}, first_queue_index={:?}, last_queue_index={:?}", chunk_idx, @@ -104,7 +103,6 @@ async fn stream_instruction_data<'a, R: Rpc>( indexer_update_info.value.addresses.last().map(|a| a.queue_index) ); - // Update next_queue_index for the next chunk based on the last address returned if let Some(last_address) = indexer_update_info.value.addresses.last() { next_queue_index = Some(last_address.queue_index + 1); debug!( @@ -143,9 +141,8 @@ async fn stream_instruction_data<'a, R: Rpc>( }; current_root = new_current_root; - info!("Generating {} zk proofs for batch_address chunk {} (parallel)", all_inputs.len(), chunk_idx + 1); + info!("Generating {} zk proofs for batch_address chunk {}", all_inputs.len(), chunk_idx + 1); - // Generate ALL proofs in parallel using join_all let proof_futures: Vec<_> = all_inputs.into_iter().enumerate().map(|(i, inputs)| { let client = Arc::clone(&proof_client); async move { @@ -154,10 +151,8 @@ async fn stream_instruction_data<'a, R: Rpc>( } }).collect(); - // Wait for all proofs to complete in parallel let proof_results = futures::future::join_all(proof_futures).await; - // Process results and batch them into groups of MAX_PROOFS_PER_TX let mut proof_buffer = Vec::new(); for (idx, result) in proof_results { match result { @@ -172,7 +167,6 @@ async fn stream_instruction_data<'a, R: Rpc>( }; proof_buffer.push(instruction_data); - // Yield when we have MAX_PROOFS_PER_TX proofs ready if proof_buffer.len() >= MAX_PROOFS_PER_TX { yield Ok(proof_buffer.clone()); proof_buffer.clear(); diff --git a/forester-utils/src/instructions/mod.rs b/forester-utils/src/instructions/mod.rs index 61ea236271..b8d5dd6f2f 100644 --- a/forester-utils/src/instructions/mod.rs +++ b/forester-utils/src/instructions/mod.rs @@ -1,6 +1,4 @@ pub mod address_batch_update; pub mod create_account; -pub mod state_batch_append; -pub mod state_batch_nullify; pub use create_account::create_account_instruction; diff --git a/forester-utils/src/instructions/state_batch_append.rs b/forester-utils/src/instructions/state_batch_append.rs deleted file mode 100644 index 32d72d14ff..0000000000 --- a/forester-utils/src/instructions/state_batch_append.rs +++ /dev/null @@ -1,242 +0,0 @@ -use std::{pin::Pin, sync::Arc, time::Duration}; - -use account_compression::processor::initialize_address_merkle_tree::Pubkey; -use async_stream::stream; -use futures::stream::Stream; -use light_batched_merkle_tree::{ - constants::DEFAULT_BATCH_STATE_TREE_HEIGHT, merkle_tree::InstructionDataBatchAppendInputs, -}; -use light_client::{indexer::Indexer, rpc::Rpc}; -use light_compressed_account::instruction_data::compressed_proof::CompressedProof; -use light_hasher::bigint::bigint_to_be_bytes_array; -use light_prover_client::{ - proof_client::ProofClient, - proof_types::batch_append::{get_batch_append_inputs, BatchAppendsCircuitInputs}, -}; -use light_sparse_merkle_tree::changelog::ChangelogEntry; -use tracing::{debug, error, trace, warn}; - -use crate::{ - error::ForesterUtilsError, rpc_pool::SolanaRpcPool, utils::wait_for_indexer, - ParsedMerkleTreeData, ParsedQueueData, -}; - -const MAX_PROOFS_PER_TX: usize = 3; - -async fn generate_zkp_proof( - circuit_inputs: BatchAppendsCircuitInputs, - proof_client: Arc, -) -> Result { - let (proof, new_root) = proof_client - .generate_batch_append_proof(circuit_inputs) - .await - .map_err(|e| ForesterUtilsError::Prover(e.to_string()))?; - Ok(InstructionDataBatchAppendInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }) -} - -#[allow(clippy::too_many_arguments)] -pub async fn get_append_instruction_stream<'a, R: Rpc>( - rpc_pool: Arc>, - merkle_tree_pubkey: Pubkey, - prover_url: String, - prover_api_key: Option, - polling_interval: Duration, - max_wait_time: Duration, - merkle_tree_data: ParsedMerkleTreeData, - output_queue_data: ParsedQueueData, -) -> Result< - ( - Pin< - Box< - dyn Stream, ForesterUtilsError>> - + Send - + 'a, - >, - >, - u16, - ), - ForesterUtilsError, -> { - trace!("Initializing append batch instruction stream with parsed data"); - let (merkle_tree_next_index, mut current_root, _) = ( - merkle_tree_data.next_index, - merkle_tree_data.current_root, - merkle_tree_data.root_history, - ); - let (zkp_batch_size, leaves_hash_chains) = ( - output_queue_data.zkp_batch_size, - output_queue_data.leaves_hash_chains, - ); - - if leaves_hash_chains.is_empty() { - trace!("No hash chains to process, returning empty stream."); - return Ok((Box::pin(futures::stream::empty()), zkp_batch_size)); - } - let rpc = rpc_pool.get_connection().await?; - wait_for_indexer(&*rpc).await?; - drop(rpc); - - let stream = stream! { - let mut next_queue_index: Option = None; - - let mut all_changelogs: Vec> = Vec::new(); - - let proof_client = Arc::new(ProofClient::with_config(prover_url.clone(), polling_interval, max_wait_time, prover_api_key.clone())); - - let mut expected_indexer_root = current_root; - let mut proofs_buffer = Vec::new(); - - for (batch_idx, leaves_hash_chain) in leaves_hash_chains.iter().enumerate() { - if !proofs_buffer.is_empty() && batch_idx > 0 { - debug!("Have {} accumulated proofs before fetching batch {}", proofs_buffer.len(), batch_idx); - yield Ok(proofs_buffer.clone()); - proofs_buffer.clear(); - debug!("Waiting for transaction to land and indexer to sync..."); - let rpc = rpc_pool.get_connection().await?; - match wait_for_indexer(&*rpc).await { - Ok(_) => { - expected_indexer_root = current_root; - debug!("Transaction landed, updated expected root for batch {}", batch_idx); - } - Err(e) => { - debug!("Could not sync with indexer, likely phase ended: {}", e); - return; - } - } - drop(rpc); - } - - let queue_elements_result = { - let mut connection = rpc_pool.get_connection().await?; - let indexer = connection.indexer_mut()?; - indexer - .get_queue_elements( - merkle_tree_pubkey.to_bytes(), - next_queue_index, - Some(zkp_batch_size), - None, - None, - None, - ) - .await - }; - - let (batch_elements, batch_first_queue_idx) = match queue_elements_result { - Ok(res) => { - let items = res.value.output_queue_elements.unwrap_or_default(); - let first_idx = res.value.output_queue_index; - if items.len() != zkp_batch_size as usize { - warn!( - "Got {} elements but expected {}, stopping", - items.len(), zkp_batch_size - ); - break; - } - - (items, first_idx) - }, - Err(e) => { - yield Err(ForesterUtilsError::Indexer(format!("Failed to get queue elements for batch {}: {}", batch_idx, e))); - return; - } - }; - - if let Some(first_element) = batch_elements.first() { - if first_element.root != expected_indexer_root { - error!( - "Root mismatch! Indexer root: {:?}, Expected root: {:?}, indexer seq: {}, first_element.leaf_index: {}", - first_element.root, - expected_indexer_root, - first_element.root_seq, - first_element.leaf_index - ); - yield Err(ForesterUtilsError::Indexer("Root mismatch between indexer and expected state".into())); - return; - } - } - - if let Some(first_idx) = batch_first_queue_idx { - next_queue_index = Some(first_idx + zkp_batch_size as u64); - debug!("Next batch will start at queue index: {:?}", next_queue_index); - } - - let old_leaves: Vec<[u8; 32]> = batch_elements.iter().map(|x| x.leaf).collect(); - let leaves: Vec<[u8; 32]> = batch_elements.iter().map(|x| x.account_hash).collect(); - let merkle_proofs: Vec> = batch_elements.iter().map(|x| x.proof.clone()).collect(); - let adjusted_start_index = merkle_tree_next_index as u32 + (batch_idx * zkp_batch_size as usize) as u32; - - debug!("Using start_index: {} (min leaf_index from batch)", adjusted_start_index); - - use light_hasher::hash_chain::create_hash_chain_from_slice; - let indexer_hashchain = create_hash_chain_from_slice(&leaves) - .map_err(|e| ForesterUtilsError::Prover(format!("Failed to calculate hashchain: {}", e)))?; - - if indexer_hashchain != *leaves_hash_chain { - error!("Hashchain mismatch! On-chain: {:?}, indexer: {:?}", - leaves_hash_chain, - indexer_hashchain - ); - yield Err(ForesterUtilsError::Indexer("Hashchain mismatch between indexer and on-chain state".into())) - } - - let (circuit_inputs, batch_changelogs) = match get_batch_append_inputs::<32>( - current_root, adjusted_start_index, leaves, *leaves_hash_chain, old_leaves, merkle_proofs, zkp_batch_size as u32, &all_changelogs, - ) { - Ok(inputs) => { - debug!("Batch append circuit inputs created successfully ({}, {})", inputs.0.start_index, inputs.0.batch_size); - inputs - }, - Err(e) => { - yield Err(ForesterUtilsError::Prover(format!("Failed to get circuit inputs: {}", e))); - return; - } - }; - - current_root = bigint_to_be_bytes_array::<32>(&circuit_inputs.new_root.to_biguint().unwrap()).unwrap(); - all_changelogs.extend(batch_changelogs); - - let client = Arc::clone(&proof_client); - match generate_zkp_proof(circuit_inputs, client).await { - Ok(proof) => { - debug!("Generated proof for batch {}", batch_idx); - proofs_buffer.push(proof); - - if proofs_buffer.len() >= MAX_PROOFS_PER_TX { - debug!("Buffer full with {} proofs, yielding for transaction", proofs_buffer.len()); - yield Ok(proofs_buffer.clone()); - proofs_buffer.clear(); - - if batch_idx < leaves_hash_chains.len() - 1 { - debug!("Waiting for transaction to land before continuing..."); - let rpc = rpc_pool.get_connection().await?; - if let Err(e) = wait_for_indexer(&*rpc).await { - yield Err(ForesterUtilsError::Indexer(format!("Failed to wait for indexer sync: {}", e))); - return; - } - drop(rpc); - expected_indexer_root = current_root; - debug!("Transaction landed, continuing with next batches"); - } - } - }, - Err(e) => { - yield Err(e); - return; - } - } - } - - if !proofs_buffer.is_empty() { - debug!("Sending final {} proofs", proofs_buffer.len()); - yield Ok(proofs_buffer); - } - }; - Ok((Box::pin(stream), zkp_batch_size)) -} diff --git a/forester-utils/src/instructions/state_batch_nullify.rs b/forester-utils/src/instructions/state_batch_nullify.rs deleted file mode 100644 index b7bc5bd4d6..0000000000 --- a/forester-utils/src/instructions/state_batch_nullify.rs +++ /dev/null @@ -1,232 +0,0 @@ -use std::{pin::Pin, sync::Arc, time::Duration}; - -use account_compression::processor::initialize_address_merkle_tree::Pubkey; -use async_stream::stream; -use futures::stream::Stream; -use light_batched_merkle_tree::{ - constants::DEFAULT_BATCH_STATE_TREE_HEIGHT, merkle_tree::InstructionDataBatchNullifyInputs, -}; -use light_client::{indexer::Indexer, rpc::Rpc}; -use light_compressed_account::instruction_data::compressed_proof::CompressedProof; -use light_hasher::bigint::bigint_to_be_bytes_array; -use light_prover_client::{ - proof_client::ProofClient, - proof_types::batch_update::{get_batch_update_inputs, BatchUpdateCircuitInputs}, -}; -use tracing::{debug, warn}; - -use crate::{ - error::ForesterUtilsError, rpc_pool::SolanaRpcPool, utils::wait_for_indexer, - ParsedMerkleTreeData, -}; - -async fn generate_nullify_zkp_proof( - inputs: BatchUpdateCircuitInputs, - proof_client: Arc, -) -> Result { - let (proof, new_root) = proof_client - .generate_batch_update_proof(inputs) - .await - .map_err(|e| ForesterUtilsError::Prover(e.to_string()))?; - Ok(InstructionDataBatchNullifyInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }) -} - -#[allow(clippy::too_many_arguments)] -pub async fn get_nullify_instruction_stream<'a, R: Rpc>( - rpc_pool: Arc>, - merkle_tree_pubkey: Pubkey, - prover_url: String, - prover_api_key: Option, - polling_interval: Duration, - max_wait_time: Duration, - merkle_tree_data: ParsedMerkleTreeData, -) -> Result< - ( - Pin< - Box< - dyn Stream< - Item = Result, ForesterUtilsError>, - > + Send - + 'a, - >, - >, - u16, - ), - ForesterUtilsError, -> { - let (mut current_root, leaves_hash_chains, _num_inserted_zkps, zkp_batch_size) = ( - merkle_tree_data.current_root, - merkle_tree_data.leaves_hash_chains, - merkle_tree_data.num_inserted_zkps, - merkle_tree_data.zkp_batch_size, - ); - - if leaves_hash_chains.is_empty() { - debug!("No hash chains to process for nullification, returning empty stream."); - return Ok((Box::pin(futures::stream::empty()), zkp_batch_size)); - } - - let rpc = rpc_pool.get_connection().await?; - wait_for_indexer(&*rpc).await?; - drop(rpc); - - let stream = stream! { - let mut next_queue_index: Option = None; - let mut all_changelogs = Vec::new(); - let proof_client = Arc::new(ProofClient::with_config(prover_url.clone(), polling_interval, max_wait_time, prover_api_key.clone())); - - let mut expected_indexer_root = current_root; - let mut proofs_buffer = Vec::new(); - const MAX_PROOFS_PER_TX: usize = 3; // Bundle up to 3 proofs per transaction - - for (batch_idx, leaves_hash_chain) in leaves_hash_chains.iter().enumerate() { - debug!( - "Fetching batch {} - tree: {}, start_queue_index: {:?}, limit: {}", - batch_idx, merkle_tree_pubkey, next_queue_index, zkp_batch_size - ); - - if !proofs_buffer.is_empty() && batch_idx > 0 { - debug!("Sending {} accumulated proofs before fetching batch {}", proofs_buffer.len(), batch_idx); - yield Ok(proofs_buffer.clone()); - proofs_buffer.clear(); - debug!("Waiting for transaction to land and indexer to sync..."); - let rpc = rpc_pool.get_connection().await?; - if let Err(e) = wait_for_indexer(&*rpc).await { - yield Err(ForesterUtilsError::Indexer(format!("Failed to wait for indexer sync after transaction: {}", e))); - return; - } - drop(rpc); - expected_indexer_root = current_root; - debug!("Transaction landed, updated expected root for batch {}", batch_idx); - } - - let queue_elements_result = { - let mut connection = rpc_pool.get_connection().await?; - let indexer = connection.indexer_mut()?; - indexer.get_queue_elements( - merkle_tree_pubkey.to_bytes(), - None, - None, - next_queue_index, - Some(zkp_batch_size), - None, - ) - .await - }; - - let (batch_elements, batch_first_queue_idx) = match queue_elements_result { - Ok(res) => { - let items = res.value.input_queue_elements.unwrap_or_default(); - let first_idx = res.value.input_queue_index; - if items.len() != zkp_batch_size as usize { - warn!( - "Got {} elements but expected {}, stopping", - items.len(), zkp_batch_size - ); - break; - } - - (items, first_idx) - }, - Err(e) => { - yield Err(ForesterUtilsError::Indexer(format!("Failed to get queue elements for batch {}: {}", batch_idx, e))); - return; - } - }; - - if let Some(first_element) = batch_elements.first() { - if first_element.root != expected_indexer_root { - debug!( - "Root mismatch for batch {}: indexer root {:?} != expected root {:?}", - batch_idx, first_element.root, expected_indexer_root - ); - yield Err(ForesterUtilsError::Indexer("Root mismatch between indexer and expected state".into())); - return; - } - } - - if let Some(first_idx) = batch_first_queue_idx { - next_queue_index = Some(first_idx + zkp_batch_size as u64); - debug!("Next batch will start at queue index: {}", first_idx + zkp_batch_size as u64); - } - - let mut leaves = Vec::new(); - let mut tx_hashes = Vec::new(); - let mut old_leaves = Vec::new(); - let mut path_indices = Vec::new(); - let mut merkle_proofs = Vec::new(); - - for leaf_info in batch_elements.iter() { - path_indices.push(leaf_info.leaf_index as u32); - leaves.push(leaf_info.account_hash); - old_leaves.push(leaf_info.leaf); - merkle_proofs.push(leaf_info.proof.clone()); - tx_hashes.push(leaf_info.tx_hash.ok_or_else(|| ForesterUtilsError::Indexer(format!("Missing tx_hash for leaf index {}", leaf_info.leaf_index)))?); - } - - let (circuit_inputs, batch_changelog) = match get_batch_update_inputs::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>( - current_root, tx_hashes, leaves, *leaves_hash_chain, old_leaves, merkle_proofs, path_indices, zkp_batch_size as u32, &all_changelogs, - ) { - Ok(inputs) => inputs, - Err(e) => { - yield Err(ForesterUtilsError::Prover(format!("Failed to get batch update inputs: {}", e))); - return; - } - }; - - all_changelogs.extend(batch_changelog); - current_root = bigint_to_be_bytes_array::<32>(&circuit_inputs.new_root.to_biguint().unwrap()).unwrap(); - - let client = Arc::clone(&proof_client); - match generate_nullify_zkp_proof(circuit_inputs, client).await { - Ok(proof) => { - debug!("Generated proof for batch {}", batch_idx); - proofs_buffer.push(proof); - - let should_send = if proofs_buffer.len() >= MAX_PROOFS_PER_TX { - debug!("Buffer full with {} proofs, sending transaction", proofs_buffer.len()); - true - } else { - false - }; - - if should_send { - debug!("Yielding {} proofs for transaction", proofs_buffer.len()); - yield Ok(proofs_buffer.clone()); - proofs_buffer.clear(); - - if batch_idx < leaves_hash_chains.len() - 1 { - debug!("Waiting for transaction to land before continuing..."); - let rpc = rpc_pool.get_connection().await?; - if let Err(e) = wait_for_indexer(&*rpc).await { - yield Err(ForesterUtilsError::Indexer(format!("Failed to wait for indexer sync: {}", e))); - return; - } - drop(rpc); - expected_indexer_root = current_root; - debug!("Transaction landed, continuing with next batches"); - } - } - }, - Err(e) => { - yield Err(e); - return; - } - } - } - - if !proofs_buffer.is_empty() { - debug!("Sending final {} proofs", proofs_buffer.len()); - yield Ok(proofs_buffer); - } - }; - - Ok((Box::pin(stream), zkp_batch_size)) -} diff --git a/forester-utils/src/lib.rs b/forester-utils/src/lib.rs index f77fd9cb8a..75b602632c 100644 --- a/forester-utils/src/lib.rs +++ b/forester-utils/src/lib.rs @@ -4,12 +4,15 @@ pub mod account_zero_copy; pub mod address_merkle_tree_config; +pub mod address_staging_tree; pub mod error; pub mod forester_epoch; +pub mod indexed_tree_processor; pub mod instructions; pub mod rate_limiter; pub mod registry; pub mod rpc_pool; +pub mod staging_tree; pub mod utils; /// Parsed merkle tree data extracted from account diff --git a/forester-utils/src/staging_tree.rs b/forester-utils/src/staging_tree.rs new file mode 100644 index 0000000000..db82a00e0e --- /dev/null +++ b/forester-utils/src/staging_tree.rs @@ -0,0 +1,276 @@ +use light_hasher::Poseidon; +use light_merkle_tree_reference::MerkleTree; +use light_prover_client::proof_types::batch_update::BatchTreeUpdateResult; +use tracing::debug; + +use crate::error::ForesterUtilsError; + +/// Result of a batch update operation on a staging tree. +#[derive(Clone, Debug)] +pub struct BatchUpdateResult { + pub old_leaves: Vec<[u8; 32]>, + pub merkle_proofs: Vec>, + pub old_root: [u8; 32], + pub new_root: [u8; 32], +} + +impl From for BatchTreeUpdateResult { + fn from(result: BatchUpdateResult) -> Self { + BatchTreeUpdateResult { + old_leaves: result.old_leaves, + merkle_proofs: result.merkle_proofs, + old_root: result.old_root, + new_root: result.new_root, + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum BatchType { + Append, + Nullify, +} + +impl std::fmt::Display for BatchType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BatchType::Append => write!(f, "APPEND"), + BatchType::Nullify => write!(f, "NULLIFY"), + } + } +} + +#[derive(Clone, Debug)] +pub struct StagingTree { + tree: MerkleTree, + current_root: [u8; 32], + /// Updates tracked as (leaf_index, new_leaf, batch_seq) + updates: Vec<(u64, [u8; 32], u64)>, + base_seq: u64, +} + +impl StagingTree { + pub fn current_root(&self) -> [u8; 32] { + self.current_root + } + + pub fn base_seq(&self) -> u64 { + self.base_seq + } + + pub fn get_leaf(&self, leaf_index: u64) -> [u8; 32] { + self.tree.layers[0] + .get(leaf_index as usize) + .copied() + .unwrap_or([0u8; 32]) + } + + fn ensure_layer_capacity(&mut self, level: usize, min_index: usize, context: &str) { + if level < self.tree.layers.len() && self.tree.layers[level].len() <= min_index { + let old_len = self.tree.layers[level].len(); + self.tree.ensure_layer_capacity(level, min_index); + debug!( + "Auto-expanded tree layer {}: {} -> {} nodes ({})", + level, + old_len, + self.tree.layers[level].len(), + context + ); + } + } + + fn do_tree_update( + &mut self, + leaf_index: u64, + new_leaf: [u8; 32], + ) -> Result<(), ForesterUtilsError> { + self.tree + .update(&new_leaf, leaf_index as usize) + .map_err(|e| { + ForesterUtilsError::StagingTree(format!( + "Failed to update leaf {}: {:?}", + leaf_index, e + )) + }) + } + + pub fn update_leaf( + &mut self, + leaf_index: u64, + new_leaf: [u8; 32], + batch_seq: u64, + ) -> Result<(), ForesterUtilsError> { + let leaf_idx = leaf_index as usize; + self.ensure_layer_capacity(0, leaf_idx, &format!("for index {}", leaf_idx)); + self.do_tree_update(leaf_index, new_leaf)?; + self.updates.push((leaf_index, new_leaf, batch_seq)); + self.current_root = self.tree.root(); + Ok(()) + } + + pub fn process_batch_updates( + &mut self, + leaf_indices: &[u64], + new_leaves: &[[u8; 32]], + batch_type: BatchType, + batch_idx: usize, + batch_seq: u64, + ) -> Result { + if leaf_indices.len() != new_leaves.len() { + return Err(ForesterUtilsError::StagingTree(format!( + "Mismatch: {} leaf indices but {} new leaves", + leaf_indices.len(), + new_leaves.len() + ))); + } + + let old_root = self.current_root(); + + if let Some(&max_leaf_idx) = leaf_indices.iter().max() { + self.ensure_layer_capacity( + 0, + max_leaf_idx as usize, + &format!( + "{} batch {} max index {}", + batch_type, batch_idx, max_leaf_idx + ), + ); + } + + let mut old_leaves = Vec::with_capacity(leaf_indices.len()); + let mut merkle_proofs = Vec::with_capacity(leaf_indices.len()); + + for (&leaf_idx, &new_leaf) in leaf_indices.iter().zip(new_leaves.iter()) { + let old_leaf = self.get_leaf(leaf_idx); + let proof = self.get_proof(leaf_idx)?; + old_leaves.push(old_leaf); + + let final_leaf = match batch_type { + BatchType::Nullify => new_leaf, + BatchType::Append => { + let is_old_leaf_zero = old_leaf.iter().all(|&byte| byte == 0); + if is_old_leaf_zero { + new_leaf + } else { + old_leaf + } + } + }; + + self.do_tree_update(leaf_idx, final_leaf)?; + self.updates.push((leaf_idx, final_leaf, batch_seq)); + + merkle_proofs.push(proof); + } + + let new_root = self.tree.root(); + self.current_root = new_root; + + debug!( + "{} batch {} root transition: {:?}[..4] -> {:?}[..4]", + batch_type, + batch_idx, + &old_root[..4], + &new_root[..4] + ); + + Ok(BatchUpdateResult { + old_leaves, + merkle_proofs, + old_root, + new_root, + }) + } + + pub fn get_proof(&self, leaf_index: u64) -> Result, ForesterUtilsError> { + self.tree + .get_proof_of_leaf(leaf_index as usize, true) + .map_err(|e| ForesterUtilsError::StagingTree(format!("Failed to get proof: {}", e))) + } + + pub fn get_updates(&self) -> &[(u64, [u8; 32], u64)] { + &self.updates + } + + pub fn clear_updates(&mut self) { + self.updates.clear(); + } + + pub fn new( + leaf_indices: &[u64], + leaves: &[[u8; 32]], + nodes: &[u64], + node_hashes: &[[u8; 32]], + initial_root: [u8; 32], + root_seq: u64, + height: usize, + ) -> Result { + debug!( + "StagingTree::new: {} leaves, {} deduplicated nodes, initial_root={:?}, root_seq={}, height={}", + leaves.len(), + nodes.len(), + &initial_root, + root_seq, + height + ); + let mut tree = MerkleTree::::new(height, 0); + for (&node_index, &node_hash) in nodes.iter().zip(node_hashes.iter()) { + // Skip nodes at root level - root is stored separately in tree.roots + let level = (node_index >> 56) as usize; + if level == height { + continue; + } + tree.insert_node(node_index, node_hash).map_err(|e| { + ForesterUtilsError::StagingTree(format!("Failed to insert node: {}", e)) + })?; + } + + for (&leaf_index, &leaf_hash) in leaf_indices.iter().zip(leaves.iter()) { + tree.insert_leaf(leaf_index as usize, leaf_hash); + } + tree.roots.push(initial_root); + + Ok(Self { + tree, + current_root: initial_root, + updates: Vec::new(), + base_seq: root_seq, + }) + } + + pub fn replay_pending_updates( + &mut self, + pending_updates: &[(u64, [u8; 32], u64)], + indexer_seq: u64, + ) -> (usize, usize, usize) { + let total = pending_updates.len(); + let mut replayed = 0; + let mut skipped_confirmed = 0; + + for &(leaf_idx, new_leaf, update_seq) in pending_updates { + // Skip updates from batches that have already been confirmed on-chain + if update_seq <= indexer_seq { + skipped_confirmed += 1; + continue; + } + + let current_leaf = self.get_leaf(leaf_idx); + let is_zero = current_leaf.iter().all(|&b| b == 0); + + if is_zero { + let leaf_idx_usize = leaf_idx as usize; + self.ensure_layer_capacity(0, leaf_idx_usize, "replay pending"); + if self.do_tree_update(leaf_idx, new_leaf).is_ok() { + self.updates.push((leaf_idx, new_leaf, update_seq)); + replayed += 1; + } + } + } + + if replayed > 0 { + self.current_root = self.tree.root(); + } + + (total, replayed, skipped_confirmed) + } +} diff --git a/forester/Cargo.toml b/forester/Cargo.toml index 5e57fd80f2..58b75c448d 100644 --- a/forester/Cargo.toml +++ b/forester/Cargo.toml @@ -18,6 +18,7 @@ light-compressed-account = { workspace = true, features = ["std"] } light-system-program-anchor = { workspace = true, features = ["cpi"] } light-hash-set = { workspace = true, features = ["solana"] } light-hasher = { workspace = true, features = ["poseidon"] } +light-indexed-array = { workspace = true } light-merkle-tree-reference = { workspace = true } light-prover-client = { workspace = true } light-registry = { workspace = true } @@ -61,6 +62,7 @@ itertools = "0.14.0" num-bigint = { workspace = true } kameo = "0.19" once_cell = "1.21.3" +async-channel = "2.3" [dev-dependencies] serial_test = { workspace = true } diff --git a/forester/package.json b/forester/package.json index cbe6f3613d..8d11663e2d 100644 --- a/forester/package.json +++ b/forester/package.json @@ -4,7 +4,7 @@ "license": "GPL-3.0", "scripts": { "build": "cargo build", - "test": "redis-start && TEST_MODE=local TEST_V1_STATE=true TEST_V2_STATE=true TEST_V1_ADDRESS=true TEST_V2_ADDRESS=true RUST_LOG=forester=debug,forester_utils=debug cargo test --package forester e2e_test -- --nocapture", + "test": "redis-start && TEST_MODE=local TEST_V1_STATE=false TEST_V2_STATE=false TEST_V1_ADDRESS=false TEST_V2_ADDRESS=true RUST_LOG=forester=debug,forester_utils=debug,light_prover_client=debug cargo test --package forester e2e_test -- --nocapture", "docker:build": "docker build --tag forester -f Dockerfile .." }, "devDependencies": { @@ -19,4 +19,4 @@ } } } -} +} \ No newline at end of file diff --git a/forester/src/cli.rs b/forester/src/cli.rs index a6c7893833..73b15825d9 100644 --- a/forester/src/cli.rs +++ b/forester/src/cli.rs @@ -59,6 +59,20 @@ pub struct StartArgs { #[arg(long, env = "FORESTER_PROVER_API_KEY")] pub prover_api_key: Option, + #[arg( + long, + env = "FORESTER_PROVER_POLLING_INTERVAL_MS", + help = "Prover polling interval in milliseconds (default: 1000)" + )] + pub prover_polling_interval_ms: Option, + + #[arg( + long, + env = "FORESTER_PROVER_MAX_WAIT_TIME_SECS", + help = "Maximum time to wait for prover response in seconds (default: 600)" + )] + pub prover_max_wait_time_secs: Option, + #[arg(long, env = "FORESTER_PAYER")] pub payer: Option, diff --git a/forester/src/config.rs b/forester/src/config.rs index 749ec1ae01..73853a2251 100644 --- a/forester/src/config.rs +++ b/forester/src/config.rs @@ -40,6 +40,8 @@ pub struct ExternalServicesConfig { pub prover_update_url: Option, pub prover_address_append_url: Option, pub prover_api_key: Option, + pub prover_polling_interval: Option, + pub prover_max_wait_time: Option, pub photon_api_key: Option, pub photon_grpc_url: Option, pub pushgateway_url: Option, @@ -171,7 +173,7 @@ impl Default for TransactionConfig { fn default() -> Self { Self { legacy_ixs_per_tx: 1, - max_concurrent_batches: 20, + max_concurrent_batches: 60, max_concurrent_sends: 50, cu_limit: 1_000_000, enable_priority_fees: false, @@ -241,6 +243,8 @@ impl ForesterConfig { .clone() .or_else(|| args.prover_url.clone()), prover_api_key: args.prover_api_key.clone(), + prover_polling_interval: args.prover_polling_interval_ms.map(Duration::from_millis), + prover_max_wait_time: args.prover_max_wait_time_secs.map(Duration::from_secs), photon_api_key: args.photon_api_key.clone(), photon_grpc_url: args.photon_grpc_url.clone(), pushgateway_url: args.push_gateway_url.clone(), @@ -329,6 +333,8 @@ impl ForesterConfig { prover_update_url: None, prover_address_append_url: None, prover_api_key: None, + prover_polling_interval: None, + prover_max_wait_time: None, photon_api_key: None, photon_grpc_url: None, pushgateway_url: args.push_gateway_url.clone(), diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index 9030614310..846fbe20c3 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -1,7 +1,7 @@ use std::{ collections::HashMap, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, }, time::Duration, @@ -55,7 +55,7 @@ use crate::{ send_transaction::send_batched_transactions, tx_builder::EpochManagerTransactions, }, - v2::{process_batched_operations, BatchContext}, + v2::{self, BatchContext, ProverConfig}, }, queue_helpers::QueueItemData, rollover::{ @@ -68,6 +68,11 @@ use crate::{ ForesterConfig, ForesterEpochInfo, Result, }; +/// Map of tree pubkey to (epoch, supervisor actor reference) +type StateSupervisorMap = Arc>)>>; +type AddressSupervisorMap = + Arc>)>>; + #[derive(Copy, Clone, Debug)] pub struct WorkReport { pub epoch: u64, @@ -101,6 +106,7 @@ pub struct EpochManager { config: Arc, protocol_config: Arc, rpc_pool: Arc>, + authority: Arc, work_report_sender: mpsc::Sender, processed_items_per_epoch_count: Arc>>, trees: Arc>>, @@ -110,6 +116,8 @@ pub struct EpochManager { tx_cache: Arc>, ops_cache: Arc>, queue_poller: Option>, + state_supervisors: StateSupervisorMap, + address_supervisors: AddressSupervisorMap, compressible_tracker: Option>, } @@ -119,6 +127,7 @@ impl Clone for EpochManager { config: self.config.clone(), protocol_config: self.protocol_config.clone(), rpc_pool: self.rpc_pool.clone(), + authority: self.authority.clone(), work_report_sender: self.work_report_sender.clone(), processed_items_per_epoch_count: self.processed_items_per_epoch_count.clone(), trees: self.trees.clone(), @@ -128,6 +137,8 @@ impl Clone for EpochManager { tx_cache: self.tx_cache.clone(), ops_cache: self.ops_cache.clone(), queue_poller: self.queue_poller.clone(), + state_supervisors: self.state_supervisors.clone(), + address_supervisors: self.address_supervisors.clone(), compressible_tracker: self.compressible_tracker.clone(), } } @@ -166,10 +177,12 @@ impl EpochManager { None }; + let authority = Arc::new(config.payer_keypair.insecure_clone()); Ok(Self { config, protocol_config, rpc_pool, + authority, work_report_sender, processed_items_per_epoch_count: Arc::new(Mutex::new(HashMap::new())), trees: Arc::new(Mutex::new(trees)), @@ -179,6 +192,8 @@ impl EpochManager { tx_cache, ops_cache, queue_poller, + state_supervisors: Arc::new(DashMap::new()), + address_supervisors: Arc::new(DashMap::new()), compressible_tracker, }) } @@ -228,6 +243,7 @@ impl EpochManager { while let Some(epoch) = rx.recv().await { debug!("Received new epoch: {}", epoch); + let self_clone = Arc::clone(&self); tokio::spawn(async move { if let Err(e) = self_clone.process_epoch(epoch).await { @@ -374,6 +390,31 @@ impl EpochManager { if last_epoch.is_none_or(|last| current_epoch > last) { debug!("New epoch detected: {}", current_epoch); + // Kill state supervisors and clear caches when a new epoch is detected + let supervisor_count = self.state_supervisors.len(); + if supervisor_count > 0 { + for entry in self.state_supervisors.iter() { + let (_, actor_ref) = entry.value(); + actor_ref.kill(); + } + self.state_supervisors.clear(); + info!( + "Killed and cleared {} state supervisor actors for new epoch {}", + supervisor_count, current_epoch + ); + } + let address_supervisor_count = self.address_supervisors.len(); + if address_supervisor_count > 0 { + for entry in self.address_supervisors.iter() { + let (_, actor_ref) = entry.value(); + actor_ref.kill(); + } + self.address_supervisors.clear(); + info!( + "Killed and cleared {} address supervisor actors for new epoch {}", + address_supervisor_count, current_epoch + ); + } let phases = get_epoch_phases(&self.protocol_config, current_epoch); if slot < phases.registration.end { debug!("Sending current epoch {} for processing", current_epoch); @@ -1160,11 +1201,15 @@ impl EpochManager { } TreeType::StateV2 | TreeType::AddressV2 => { if let Some(ref mut rx) = queue_update_rx { + let consecutive_end = tree_schedule + .get_consecutive_eligibility_end(slot_idx) + .unwrap_or(light_slot_details.end_solana_slot); self.process_light_slot_v2( epoch_info, epoch_pda, &tree_schedule.tree_accounts, &light_slot_details, + consecutive_end, rx, ) .await @@ -1235,10 +1280,11 @@ impl EpochManager { forester_slot_details: &ForesterSlot, ) -> Result<()> { info!( - "Processing slot {} ({}-{})", + "Processing slot {} ({}-{}) epoch {}", forester_slot_details.slot, forester_slot_details.start_solana_slot, - forester_slot_details.end_solana_slot + forester_slot_details.end_solana_slot, + epoch_info.epoch ); let mut rpc = self.rpc_pool.get_connection().await?; wait_until_slot_reached( @@ -1285,6 +1331,7 @@ impl EpochManager { epoch_pda, tree_accounts, forester_slot_details, + forester_slot_details.end_solana_slot, estimated_slot, None, ) @@ -1329,7 +1376,7 @@ impl EpochManager { #[instrument( level = "debug", - skip(self, epoch_info, epoch_pda, tree_accounts, forester_slot_details, queue_update_rx), + skip(self, epoch_info, epoch_pda, tree_accounts, forester_slot_details, consecutive_eligibility_end, queue_update_rx), fields(tree = %tree_accounts.merkle_tree) )] async fn process_light_slot_v2( @@ -1338,13 +1385,15 @@ impl EpochManager { epoch_pda: &ForesterEpochPda, tree_accounts: &TreeAccounts, forester_slot_details: &ForesterSlot, + consecutive_eligibility_end: u64, queue_update_rx: &mut mpsc::Receiver, ) -> Result<()> { info!( - "Processing V2 light slot {} ({}-{})", + "Processing V2 light slot {} ({}-{}, consecutive_end={})", forester_slot_details.slot, forester_slot_details.start_solana_slot, - forester_slot_details.end_solana_slot + forester_slot_details.end_solana_slot, + consecutive_eligibility_end ); let mut rpc = self.rpc_pool.get_connection().await?; @@ -1408,6 +1457,7 @@ impl EpochManager { epoch_pda, tree_accounts, forester_slot_details, + consecutive_eligibility_end, estimated_slot, Some(&update), ) @@ -1518,12 +1568,14 @@ impl EpochManager { Ok(true) } + #[allow(clippy::too_many_arguments)] async fn dispatch_tree_processing( &self, epoch_info: &Epoch, epoch_pda: &ForesterEpochPda, tree_accounts: &TreeAccounts, forester_slot_details: &ForesterSlot, + consecutive_eligibility_end: u64, current_solana_slot: u64, queue_update: Option<&QueueUpdateMessage>, ) -> Result { @@ -1540,8 +1592,13 @@ impl EpochManager { .await } TreeType::StateV2 | TreeType::AddressV2 => { - self.process_v2(epoch_info, tree_accounts, queue_update) - .await + self.process_v2( + epoch_info, + tree_accounts, + queue_update, + consecutive_eligibility_end, + ) + .await } } } @@ -1722,66 +1779,257 @@ impl EpochManager { } } - async fn process_v2( + fn build_batch_context( &self, epoch_info: &Epoch, tree_accounts: &TreeAccounts, - queue_update: Option<&QueueUpdateMessage>, - ) -> Result { + input_queue_hint: Option, + output_queue_hint: Option, + eligibility_end: Option, + ) -> BatchContext { let default_prover_url = "http://127.0.0.1:3001".to_string(); - - let (input_queue_hint, output_queue_hint) = if let Some(update) = queue_update { - match update.queue_type { - light_compressed_account::QueueType::InputStateV2 => { - (Some(update.queue_size), None) - } - light_compressed_account::QueueType::OutputStateV2 => { - (None, Some(update.queue_size)) - } - _ => (None, None), - } - } else { - (None, None) - }; - - let batch_context = BatchContext { + let eligibility_end = eligibility_end.unwrap_or(0); + BatchContext { rpc_pool: self.rpc_pool.clone(), - authority: self.config.payer_keypair.insecure_clone(), + authority: self.authority.clone(), derivation: self.config.derivation_pubkey, epoch: epoch_info.epoch, merkle_tree: tree_accounts.merkle_tree, output_queue: tree_accounts.queue, - prover_append_url: self - .config - .external_services - .prover_append_url - .clone() - .unwrap_or_else(|| default_prover_url.clone()), - prover_update_url: self - .config - .external_services - .prover_update_url - .clone() - .unwrap_or_else(|| default_prover_url.clone()), - prover_address_append_url: self - .config - .external_services - .prover_address_append_url - .clone() - .unwrap_or_else(|| default_prover_url.clone()), - prover_api_key: self.config.external_services.prover_api_key.clone(), - prover_polling_interval: Duration::from_secs(1), - prover_max_wait_time: Duration::from_secs(600), + prover_config: ProverConfig { + append_url: self + .config + .external_services + .prover_append_url + .clone() + .unwrap_or_else(|| default_prover_url.clone()), + update_url: self + .config + .external_services + .prover_update_url + .clone() + .unwrap_or_else(|| default_prover_url.clone()), + address_append_url: self + .config + .external_services + .prover_address_append_url + .clone() + .unwrap_or_else(|| default_prover_url.clone()), + api_key: self.config.external_services.prover_api_key.clone(), + polling_interval: self + .config + .external_services + .prover_polling_interval + .unwrap_or(Duration::from_secs(1)), + max_wait_time: self + .config + .external_services + .prover_max_wait_time + .unwrap_or(Duration::from_secs(600)), + }, ops_cache: self.ops_cache.clone(), epoch_phases: epoch_info.phases.clone(), slot_tracker: self.slot_tracker.clone(), input_queue_hint, output_queue_hint, - }; + num_proof_workers: self.config.transaction_config.max_concurrent_batches, + forester_eligibility_end_slot: Arc::new(AtomicU64::new(eligibility_end)), + } + } - process_batched_operations(batch_context, tree_accounts.tree_type) - .await - .map_err(|e| anyhow!("Failed to process V2 operations: {}", e)) + async fn get_or_create_state_supervisor( + &self, + epoch_info: &Epoch, + tree_accounts: &TreeAccounts, + ) -> Result>> { + use dashmap::mapref::entry::Entry; + + let entry = self.state_supervisors.entry(tree_accounts.merkle_tree); + + match entry { + Entry::Occupied(mut occupied) => { + let (stored_epoch, supervisor_ref) = occupied.get(); + if *stored_epoch == epoch_info.epoch { + Ok(supervisor_ref.clone()) + } else { + info!( + "Removing stale StateSupervisor for tree {} (epoch {} -> {})", + tree_accounts.merkle_tree, *stored_epoch, epoch_info.epoch + ); + // Don't pass forester_slot - StateSupervisor is long-lived across forester slots, + // so it should use the global active phase end for safety checks + let batch_context = + self.build_batch_context(epoch_info, tree_accounts, None, None, None); + let supervisor = v2::state::StateSupervisor::spawn(batch_context); + info!( + "Created StateSupervisor actor for tree {} (epoch {})", + tree_accounts.merkle_tree, epoch_info.epoch + ); + occupied.insert((epoch_info.epoch, supervisor.clone())); + Ok(supervisor) + } + } + Entry::Vacant(vacant) => { + // Don't pass forester_slot - StateSupervisor is long-lived across forester slots, + // so it should use the global active phase end for safety checks + let batch_context = + self.build_batch_context(epoch_info, tree_accounts, None, None, None); + let supervisor = v2::state::StateSupervisor::spawn(batch_context); + info!( + "Created StateSupervisor actor for tree {} (epoch {})", + tree_accounts.merkle_tree, epoch_info.epoch + ); + vacant.insert((epoch_info.epoch, supervisor.clone())); + Ok(supervisor) + } + } + } + + async fn get_or_create_address_supervisor( + &self, + epoch_info: &Epoch, + tree_accounts: &TreeAccounts, + ) -> Result>> { + use dashmap::mapref::entry::Entry; + + let entry = self.address_supervisors.entry(tree_accounts.merkle_tree); + + match entry { + Entry::Occupied(mut occupied) => { + let (stored_epoch, supervisor_ref) = occupied.get(); + if *stored_epoch == epoch_info.epoch { + Ok(supervisor_ref.clone()) + } else { + info!( + "Removing stale AddressSupervisor for tree {} (epoch {} -> {})", + tree_accounts.merkle_tree, *stored_epoch, epoch_info.epoch + ); + let batch_context = + self.build_batch_context(epoch_info, tree_accounts, None, None, None); + let supervisor = + v2::address::supervisor::AddressSupervisor::spawn(batch_context); + info!( + "Created AddressSupervisor actor for tree {} (epoch {})", + tree_accounts.merkle_tree, epoch_info.epoch + ); + occupied.insert((epoch_info.epoch, supervisor.clone())); + Ok(supervisor) + } + } + Entry::Vacant(vacant) => { + let batch_context = + self.build_batch_context(epoch_info, tree_accounts, None, None, None); + let supervisor = v2::address::supervisor::AddressSupervisor::spawn(batch_context); + info!( + "Created AddressSupervisor actor for tree {} (epoch {})", + tree_accounts.merkle_tree, epoch_info.epoch + ); + vacant.insert((epoch_info.epoch, supervisor.clone())); + Ok(supervisor) + } + } + } + + async fn process_v2( + &self, + epoch_info: &Epoch, + tree_accounts: &TreeAccounts, + queue_update: Option<&QueueUpdateMessage>, + consecutive_eligibility_end: u64, + ) -> Result { + match tree_accounts.tree_type { + TreeType::StateV2 => { + if let Some(update) = queue_update { + let supervisor = self + .get_or_create_state_supervisor(epoch_info, tree_accounts) + .await?; + + supervisor + .ask(v2::state::UpdateEligibility { + end_slot: consecutive_eligibility_end, + }) + .send() + .await + .map_err(|e| { + anyhow!( + "Failed to send UpdateEligibility to StateSupervisor for tree {}: {}", + tree_accounts.merkle_tree, + e + ) + })?; + + let work = v2::state::QueueWork { + queue_type: update.queue_type, + queue_size: update.queue_size, + }; + + Ok(supervisor + .ask(v2::state::ProcessQueueUpdate { queue_work: work }) + .send() + .await + .map_err(|e| { + anyhow!( + "Failed to send message to StateSupervisor for tree {}: {}", + tree_accounts.merkle_tree, + e + ) + })?) + } else { + Ok(0) + } + } + TreeType::AddressV2 => { + if let Some(update) = queue_update { + let supervisor = self + .get_or_create_address_supervisor(epoch_info, tree_accounts) + .await?; + + if let Err(e) = supervisor + .ask(v2::common::UpdateEligibility { + end_slot: consecutive_eligibility_end, + }) + .send() + .await + { + warn!( + "Failed to send UpdateEligibility to AddressSupervisor for tree {}: {}. Removing supervisor.", + tree_accounts.merkle_tree, e + ); + self.address_supervisors.remove(&tree_accounts.merkle_tree); + return Err(anyhow!("Failed to send UpdateEligibility: {}", e)); + } + + let work = v2::address::supervisor::AddressQueueWork { + queue_size: update.queue_size, + }; + + match supervisor + .ask(v2::address::supervisor::ProcessAddressQueueUpdate { work }) + .send() + .await + { + Ok(res) => Ok(res), + Err(e) => { + warn!( + "Failed to send ProcessAddressQueueUpdate to AddressSupervisor for tree {}: {}. Removing supervisor.", + tree_accounts.merkle_tree, e + ); + self.address_supervisors.remove(&tree_accounts.merkle_tree); + Err(anyhow!("Failed to send ProcessAddressQueueUpdate: {}", e)) + } + } + } else { + Ok(0) + } + } + _ => { + warn!( + "Unsupported tree type for V2 processing: {:?}", + tree_accounts.tree_type + ); + Ok(0) + } + } } async fn update_metrics_and_counts( @@ -2194,6 +2442,8 @@ mod tests { rpc_rate_limit: None, photon_rate_limit: None, send_tx_rate_limit: None, + prover_polling_interval: None, + prover_max_wait_time: None, }, retry_config: RetryConfig::default(), queue_config: Default::default(), diff --git a/forester/src/polling/queue_poller.rs b/forester/src/polling/queue_poller.rs index df36d8b8da..42bd374226 100644 --- a/forester/src/polling/queue_poller.rs +++ b/forester/src/polling/queue_poller.rs @@ -123,7 +123,7 @@ impl QueueInfoPoller { ); } Err(mpsc::error::TrySendError::Full(_)) => { - warn!( + debug!( "Tree {} channel full, dropping update (tree processing slower than updates)", info.tree ); diff --git a/forester/src/probe_type.rs b/forester/src/probe_type.rs new file mode 100644 index 0000000000..93b55c69c6 --- /dev/null +++ b/forester/src/probe_type.rs @@ -0,0 +1,9 @@ +#[cfg(test)] +mod tests { + use light_batched_merkle_tree::address_merkle_tree::AddressMerkleTreeAccount; + + #[test] + fn test_import() { + let _ = AddressMerkleTreeAccount::default(); + } +} diff --git a/forester/src/processor/v2/address.rs b/forester/src/processor/v2/address.rs deleted file mode 100644 index 22365275a0..0000000000 --- a/forester/src/processor/v2/address.rs +++ /dev/null @@ -1,60 +0,0 @@ -use anyhow::Error; -use borsh::BorshSerialize; -use forester_utils::instructions::address_batch_update::{ - get_address_update_instruction_stream, AddressUpdateConfig, -}; -use futures::stream::{Stream, StreamExt}; -use light_batched_merkle_tree::merkle_tree::InstructionDataAddressAppendInputs; -use light_client::rpc::Rpc; -use light_registry::account_compression_cpi::sdk::create_batch_update_address_tree_instruction; -use solana_program::instruction::Instruction; -use solana_sdk::signer::Signer; -use tracing::instrument; - -use super::common::{process_stream, BatchContext, ParsedMerkleTreeData}; -use crate::Result; - -async fn create_stream_future( - ctx: &BatchContext, - merkle_tree_data: ParsedMerkleTreeData, -) -> Result<( - impl Stream>> + Send, - u16, -)> -where - R: Rpc, -{ - let config = AddressUpdateConfig { - rpc_pool: ctx.rpc_pool.clone(), - merkle_tree_pubkey: ctx.merkle_tree, - prover_url: ctx.prover_address_append_url.clone(), - prover_api_key: ctx.prover_api_key.clone(), - polling_interval: ctx.prover_polling_interval, - max_wait_time: ctx.prover_max_wait_time, - }; - let (stream, size) = get_address_update_instruction_stream(config, merkle_tree_data) - .await - .map_err(Error::from)?; - let stream = stream.map(|item| item.map_err(Error::from)); - Ok((stream, size)) -} - -#[instrument(level = "debug", skip(context, merkle_tree_data), fields(tree = %context.merkle_tree))] -pub(crate) async fn process_batch( - context: &BatchContext, - merkle_tree_data: ParsedMerkleTreeData, -) -> Result { - let instruction_builder = |data: &InstructionDataAddressAppendInputs| -> Instruction { - let serialized_data = data.try_to_vec().unwrap(); - create_batch_update_address_tree_instruction( - context.authority.pubkey(), - context.derivation, - context.merkle_tree, - context.epoch, - serialized_data, - ) - }; - - let stream_future = create_stream_future(context, merkle_tree_data); - process_stream(context, stream_future, instruction_builder).await -} diff --git a/forester/src/processor/v2/address/mod.rs b/forester/src/processor/v2/address/mod.rs new file mode 100644 index 0000000000..72838f2cad --- /dev/null +++ b/forester/src/processor/v2/address/mod.rs @@ -0,0 +1 @@ +pub mod supervisor; diff --git a/forester/src/processor/v2/address/supervisor.rs b/forester/src/processor/v2/address/supervisor.rs new file mode 100644 index 0000000000..840147f388 --- /dev/null +++ b/forester/src/processor/v2/address/supervisor.rs @@ -0,0 +1,427 @@ +use anyhow::anyhow; +use forester_utils::address_staging_tree::AddressStagingTree; +use kameo::{ + actor::{ActorRef, WeakActorRef}, + error::ActorStopReason, + message::Message, + Actor, +}; +use light_client::rpc::Rpc; +use tokio::sync::mpsc; +use tracing::{debug, error, info, trace, warn}; + +use crate::processor::v2::{ + common::UpdateEligibility, + state::{ + helpers::{fetch_address_batches, fetch_address_zkp_batch_size}, + proof_worker::{spawn_proof_workers, ProofInput, ProofJob, ProofResult}, + tx_sender::TxSender, + }, + BatchContext, +}; + +#[derive(Debug, Clone)] +pub struct AddressQueueWork { + pub queue_size: u64, +} + +#[derive(Debug, Clone)] +pub struct ProcessAddressQueueUpdate { + pub work: AddressQueueWork, +} + +struct WorkerPool { + job_tx: async_channel::Sender, +} + +pub struct AddressSupervisor { + context: BatchContext, + staging_tree: Option, + current_root: [u8; 32], + zkp_batch_size: u64, + worker_pool: Option, +} + +impl Actor for AddressSupervisor { + type Args = BatchContext; + type Error = anyhow::Error; + + async fn on_start( + context: Self::Args, + _actor_ref: ActorRef, + ) -> Result { + info!( + "AddressSupervisor actor starting for tree {}", + context.merkle_tree + ); + + let zkp_batch_size = fetch_address_zkp_batch_size(&context).await.map_err(|e| { + error!( + "Failed to fetch zkp_batch_size for tree {}: {}", + context.merkle_tree, e + ); + e + })?; + info!( + "AddressSupervisor fetched zkp_batch_size={} for tree {}", + zkp_batch_size, context.merkle_tree + ); + + Ok(Self { + context, + staging_tree: None, + current_root: [0u8; 32], + zkp_batch_size, + worker_pool: None, + }) + } + + async fn on_stop( + &mut self, + _actor_ref: WeakActorRef, + _reason: ActorStopReason, + ) -> Result<(), Self::Error> { + info!( + "AddressSupervisor actor stopping for tree {}", + self.context.merkle_tree + ); + Ok(()) + } +} + +impl Message for AddressSupervisor { + type Reply = crate::Result; + + async fn handle( + &mut self, + msg: ProcessAddressQueueUpdate, + _ctx: &mut kameo::message::Context, + ) -> Self::Reply { + self.process_queue_update(msg.work).await + } +} + +impl Message for AddressSupervisor { + type Reply = (); + + async fn handle( + &mut self, + msg: UpdateEligibility, + _ctx: &mut kameo::message::Context, + ) -> Self::Reply { + debug!( + "Updating eligibility end slot to {} for tree {}", + msg.end_slot, self.context.merkle_tree + ); + self.context + .forester_eligibility_end_slot + .store(msg.end_slot, std::sync::atomic::Ordering::Relaxed); + } +} + +impl AddressSupervisor { + fn zkp_batch_size(&self) -> u64 { + self.zkp_batch_size + } + + fn ensure_worker_pool(&mut self) { + if self.worker_pool.is_none() { + let num_workers = self.context.num_proof_workers.max(1); + let job_tx = spawn_proof_workers(num_workers, self.context.prover_config.clone()); + self.worker_pool = Some(WorkerPool { job_tx }); + } + } + + fn reset_staging_tree(&mut self) { + info!( + "Resetting staging tree for tree {}", + self.context.merkle_tree + ); + self.staging_tree = None; + } + + fn build_staging_tree( + &mut self, + subtrees: &[[u8; 32]], + start_index: usize, + initial_root: [u8; 32], + ) -> crate::Result<()> { + self.staging_tree = Some(AddressStagingTree::from_subtrees_vec( + subtrees.to_vec(), + start_index, + initial_root, + )?); + self.current_root = initial_root; + debug!( + "Built staging tree from indexer (root={:?}[..4])", + &initial_root[..4] + ); + Ok(()) + } + + fn get_leaves_hashchain( + leaves_hash_chains: &[[u8; 32]], + batch_idx: usize, + ) -> crate::Result<[u8; 32]> { + leaves_hash_chains.get(batch_idx).copied().ok_or_else(|| { + anyhow!( + "Missing leaves_hash_chain for batch {} (available: {})", + batch_idx, + leaves_hash_chains.len() + ) + }) + } + + fn batch_range(&self, total_len: usize, start: usize) -> std::ops::Range { + let end = (start + self.zkp_batch_size as usize).min(total_len); + start..end + } + + fn create_job(seq: u64, inputs: ProofInput, result_tx: mpsc::Sender) -> ProofJob { + ProofJob { + seq, + inputs, + result_tx, + } + } + + async fn process_queue_update(&mut self, work: AddressQueueWork) -> crate::Result { + debug!( + "AddressSupervisor processing queue update for tree {}", + self.context.merkle_tree + ); + + let zkp_batch_size = self.zkp_batch_size(); + if work.queue_size < zkp_batch_size { + trace!( + "Queue size {} below zkp_batch_size {}, skipping", + work.queue_size, + zkp_batch_size + ); + return Ok(0); + } + + let max_batches = (work.queue_size / zkp_batch_size) as usize; + if max_batches == 0 { + return Ok(0); + } + + self.ensure_worker_pool(); + + let num_workers = self.context.num_proof_workers.max(1); + let (proof_tx, proof_rx) = mpsc::channel(num_workers * 2); + + // Spawn tx sender with the current root + let tx_sender_handle = TxSender::spawn( + self.context.clone(), + proof_rx, + self.zkp_batch_size(), + self.current_root, + ); + + let job_tx = self + .worker_pool + .as_ref() + .expect("worker pool should be initialized") + .job_tx + .clone(); + + // Build and send jobs one at a time + let jobs_sent = self + .enqueue_batches(max_batches, job_tx, proof_tx.clone()) + .await?; + + // Drop proof_tx to signal no more proofs are coming + drop(proof_tx); + + // Wait for all transactions to complete + let tx_processed = match tx_sender_handle.await { + Ok(res) => match res { + Ok(processed) => processed, + Err(e) => { + warn!("Tx sender error, resetting staging tree: {}", e); + self.reset_staging_tree(); + return Err(e); + } + }, + Err(e) => { + warn!("Tx sender join error, resetting staging tree: {}", e); + self.reset_staging_tree(); + return Err(anyhow!("Tx sender join error: {}", e)); + } + }; + + if tx_processed < jobs_sent * self.zkp_batch_size as usize { + debug!( + "Processed {} items but expected {}, some proofs may have failed", + tx_processed, + jobs_sent * self.zkp_batch_size as usize + ); + } + + Ok(tx_processed) + } + + async fn enqueue_batches( + &mut self, + max_batches: usize, + job_tx: async_channel::Sender, + result_tx: mpsc::Sender, + ) -> crate::Result { + let zkp_batch_size = self.zkp_batch_size() as usize; + let total_needed = max_batches.saturating_mul(zkp_batch_size); + let fetch_len = total_needed as u64; + + debug!( + "Fetching address batches: fetch_len={}, zkp_batch_size={}", + fetch_len, + self.zkp_batch_size() + ); + let address_queue = + fetch_address_batches(&self.context, None, fetch_len, self.zkp_batch_size()).await?; + + let Some(address_queue) = address_queue else { + debug!("fetch_address_batches returned None, no address queue data available"); + return Ok(0); + }; + + if address_queue.addresses.is_empty() { + debug!("Address queue is empty, returning"); + return Ok(0); + } + + // Validate we have required data + if address_queue.subtrees.is_empty() { + return Err(anyhow!( + "Address queue missing subtrees data (required for proof generation)" + )); + } + + // Calculate how many complete batches we can process + let available = address_queue.addresses.len(); + let num_slices = (available / zkp_batch_size).min(max_batches); + + // If we can't form any complete batches, return early + if num_slices == 0 { + debug!( + "Not enough addresses for a complete batch: have {}, need {}", + available, zkp_batch_size + ); + return Ok(0); + } + + // Validate we have hash chains for the batches we'll process + if address_queue.leaves_hash_chains.len() < num_slices { + return Err(anyhow!( + "Insufficient leaves_hash_chains: have {}, need {} for {} batches", + address_queue.leaves_hash_chains.len(), + num_slices, + num_slices + )); + } + + // Build or update staging tree + self.current_root = address_queue.initial_root; + info!( + "Synced from indexer: root {:?}[..4], start_index {}, {} subtrees", + &self.current_root[..4], + address_queue.start_index, + address_queue.subtrees.len() + ); + + self.build_staging_tree( + &address_queue.subtrees, + address_queue.start_index as usize, + address_queue.initial_root, + )?; + + let mut jobs_sent = 0usize; + + // Generate circuit inputs and send jobs sequentially + for batch_idx in 0..num_slices { + let start = batch_idx * zkp_batch_size; + if let Some(job) = self + .build_append_job(batch_idx, &address_queue, start, result_tx.clone()) + .await? + { + job_tx.send(job).await?; + jobs_sent += 1; + } else { + break; + } + } + + info!("Enqueued {} jobs for proof generation", jobs_sent); + Ok(jobs_sent) + } + + async fn build_append_job( + &mut self, + batch_idx: usize, + address_queue: &light_client::indexer::AddressQueueDataV2, + start: usize, + result_tx: mpsc::Sender, + ) -> crate::Result> { + let range = self.batch_range(address_queue.addresses.len(), start); + let addresses = address_queue.addresses[range.clone()].to_vec(); + let zkp_batch_size = addresses.len(); + + // Get data for this batch + let low_element_values = address_queue.low_element_values[range.clone()].to_vec(); + let low_element_next_values = address_queue.low_element_next_values[range.clone()].to_vec(); + let low_element_indices: Vec = address_queue.low_element_indices[range.clone()] + .iter() + .map(|&i| i as usize) + .collect(); + let low_element_next_indices: Vec = address_queue.low_element_next_indices + [range.clone()] + .iter() + .map(|&i| i as usize) + .collect(); + let low_element_proofs = address_queue.low_element_proofs[range].to_vec(); + + // Get pre-computed hash chain for this batch + let leaves_hashchain = + Self::get_leaves_hashchain(&address_queue.leaves_hash_chains, batch_idx)?; + + // Get mutable reference to staging tree + let staging_tree = self.staging_tree.as_mut().ok_or_else(|| { + anyhow!( + "Staging tree not initialized for append job (batch_idx={})", + batch_idx + ) + })?; + + // Process batch using AddressStagingTree which internally uses + // get_batch_address_append_circuit_inputs with proper changelog management + let result = staging_tree + .process_batch( + addresses, + low_element_values, + low_element_next_values, + low_element_indices, + low_element_next_indices, + low_element_proofs, + leaves_hashchain, + zkp_batch_size, + ) + .map_err(|e| anyhow!("Failed to process address batch: {}", e))?; + + let new_root = result.new_root; + debug!( + "Address batch {} root transition: {:?}[..4] -> {:?}[..4]", + batch_idx, + &result.old_root[..4], + &new_root[..4] + ); + + // Update current root + self.current_root = new_root; + + Ok(Some(Self::create_job( + batch_idx as u64, + ProofInput::AddressAppend(result.circuit_inputs), + result_tx, + ))) + } +} diff --git a/forester/src/processor/v2/common.rs b/forester/src/processor/v2/common.rs index 5e1906027d..4a6f6f1ffd 100644 --- a/forester/src/processor/v2/common.rs +++ b/forester/src/processor/v2/common.rs @@ -1,178 +1,108 @@ -use std::{future::Future, sync::Arc, time::Duration}; - -use borsh::BorshSerialize; -use forester_utils::{ - forester_epoch::EpochPhases, rpc_pool::SolanaRpcPool, utils::wait_for_indexer, +use std::{ + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, }; + +use forester_utils::{forester_epoch::EpochPhases, rpc_pool::SolanaRpcPool}; pub use forester_utils::{ParsedMerkleTreeData, ParsedQueueData}; -use futures::{pin_mut, stream::StreamExt, Stream}; -use light_batched_merkle_tree::{ - batch::BatchState, merkle_tree::BatchedMerkleTreeAccount, queue::BatchedQueueAccount, -}; use light_client::rpc::Rpc; -use light_compressed_account::TreeType; use light_registry::protocol_config::state::EpochState; use solana_sdk::{instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer}; use tokio::sync::Mutex; -use tracing::{debug, error, info, trace}; +use tracing::{debug, info}; -use super::{address, state}; use crate::{ errors::ForesterError, processor::tx_cache::ProcessedHashCache, slot_tracker::SlotTracker, Result, }; -#[derive(Debug)] -pub enum BatchReadyState { - NotReady, - AddressReadyForAppend { - merkle_tree_data: ParsedMerkleTreeData, - }, - StateReadyForAppend { - merkle_tree_data: ParsedMerkleTreeData, - output_queue_data: ParsedQueueData, - }, - StateReadyForNullify { - merkle_tree_data: ParsedMerkleTreeData, - }, +const SLOTS_STOP_THRESHOLD: u64 = 1; + +#[derive(Debug, Clone)] +pub struct ProverConfig { + pub append_url: String, + pub update_url: String, + pub address_append_url: String, + pub api_key: Option, + pub polling_interval: Duration, + pub max_wait_time: Duration, +} + +#[derive(Debug, Clone)] +pub struct UpdateEligibility { + pub end_slot: u64, } #[derive(Debug)] pub struct BatchContext { pub rpc_pool: Arc>, - pub authority: Keypair, + pub authority: Arc, pub derivation: Pubkey, pub epoch: u64, pub merkle_tree: Pubkey, pub output_queue: Pubkey, - pub prover_append_url: String, - pub prover_update_url: String, - pub prover_address_append_url: String, - pub prover_api_key: Option, - pub prover_polling_interval: Duration, - pub prover_max_wait_time: Duration, + pub prover_config: ProverConfig, pub ops_cache: Arc>, pub epoch_phases: EpochPhases, pub slot_tracker: Arc, - /// input queue size from gRPC pub input_queue_hint: Option, - /// output queue size from gRPC pub output_queue_hint: Option, + pub num_proof_workers: usize, + pub forester_eligibility_end_slot: Arc, } -#[derive(Debug)] -pub struct BatchProcessor { - context: BatchContext, - tree_type: TreeType, -} - -/// Processes a stream of batched instruction data into transactions. -pub(crate) async fn process_stream( - context: &BatchContext, - stream_creator_future: FutC, - instruction_builder: impl Fn(&D) -> Instruction, -) -> Result -where - R: Rpc, - S: Stream>> + Send, - D: BorshSerialize, - FutC: Future> + Send, -{ - trace!("Executing batched stream processor (hybrid)"); - - let (batch_stream, zkp_batch_size) = stream_creator_future.await?; - - if zkp_batch_size == 0 { - trace!("ZKP batch size is 0, no work to do."); - return Ok(0); - } - - pin_mut!(batch_stream); - let mut total_instructions_processed = 0; - - while let Some(batch_result) = batch_stream.next().await { - let instruction_batch = batch_result?; - - if instruction_batch.is_empty() { - continue; +impl Clone for BatchContext { + fn clone(&self) -> Self { + Self { + rpc_pool: self.rpc_pool.clone(), + authority: self.authority.clone(), + derivation: self.derivation, + epoch: self.epoch, + merkle_tree: self.merkle_tree, + output_queue: self.output_queue, + prover_config: self.prover_config.clone(), + ops_cache: self.ops_cache.clone(), + epoch_phases: self.epoch_phases.clone(), + slot_tracker: self.slot_tracker.clone(), + input_queue_hint: self.input_queue_hint, + output_queue_hint: self.output_queue_hint, + num_proof_workers: self.num_proof_workers, + forester_eligibility_end_slot: self.forester_eligibility_end_slot.clone(), } - - let current_slot = context.slot_tracker.estimated_current_slot(); - let phase_end_slot = context.epoch_phases.active.end; - let slots_remaining = phase_end_slot.saturating_sub(current_slot); - - const MIN_SLOTS_FOR_TRANSACTION: u64 = 30; - if slots_remaining < MIN_SLOTS_FOR_TRANSACTION { - info!( - "Only {} slots remaining in active phase (need at least {}), stopping batch processing", - slots_remaining, MIN_SLOTS_FOR_TRANSACTION - ); - if !instruction_batch.is_empty() { - let instructions: Vec = - instruction_batch.iter().map(&instruction_builder).collect(); - let _ = send_transaction_batch(context, instructions).await; - } - break; - } - - let instructions: Vec = - instruction_batch.iter().map(&instruction_builder).collect(); - - match send_transaction_batch(context, instructions.clone()).await { - Ok(sig) => { - total_instructions_processed += instruction_batch.len(); - debug!( - "Successfully processed batch with {} instructions, signature: {}", - instruction_batch.len(), - sig - ); - - { - let rpc = context.rpc_pool.get_connection().await?; - wait_for_indexer(&*rpc) - .await - .map_err(|e| anyhow::anyhow!("Error waiting for indexer: {:?}", e))?; - } - } - Err(e) => { - if let Some(ForesterError::NotInActivePhase) = e.downcast_ref::() { - info!("Active phase ended while processing batches, stopping gracefully"); - break; - } else { - error!( - "Failed to process batch with {} instructions for tree {}: {:?}", - instructions.len(), - context.merkle_tree, - e - ); - return Err(e); - } - } - } - } - - if total_instructions_processed == 0 { - trace!("No instructions were processed from the stream."); - return Ok(0); } - - let total_items_processed = total_instructions_processed * zkp_batch_size as usize; - Ok(total_items_processed) } pub(crate) async fn send_transaction_batch( context: &BatchContext, instructions: Vec, ) -> Result { - // Check if we're still in the active phase before sending the transaction let current_slot = context.slot_tracker.estimated_current_slot(); let current_phase_state = context.epoch_phases.get_current_epoch_state(current_slot); if current_phase_state != EpochState::Active { - trace!( - "Skipping transaction send: not in active phase (current phase: {:?}, slot: {})", - current_phase_state, - current_slot + debug!( + "!! Skipping transaction send: not in active phase (current phase: {:?}, slot: {})", + current_phase_state, current_slot + ); + return Err(ForesterError::NotInActivePhase.into()); + } + + let forester_end = context + .forester_eligibility_end_slot + .load(Ordering::Acquire); + let eligibility_end_slot = if forester_end > 0 { + forester_end + } else { + context.epoch_phases.active.end + }; + let slots_remaining = eligibility_end_slot.saturating_sub(current_slot); + if slots_remaining < SLOTS_STOP_THRESHOLD { + debug!( + "Skipping transaction send: only {} slots remaining until eligibility ends", + slots_remaining ); return Err(ForesterError::NotInActivePhase.into()); } @@ -187,7 +117,7 @@ pub(crate) async fn send_transaction_batch( .create_and_send_transaction( &instructions, &context.authority.pubkey(), - &[&context.authority], + &[context.authority.as_ref()], ) .await?; @@ -209,397 +139,3 @@ pub(crate) async fn send_transaction_batch( Ok(signature.to_string()) } - -impl BatchProcessor { - pub fn new(context: BatchContext, tree_type: TreeType) -> Self { - Self { context, tree_type } - } - - pub async fn process(&self) -> Result { - trace!( - "Starting batch processing for tree type: {:?}", - self.tree_type - ); - let state = self.verify_batch_ready().await; - - match state { - BatchReadyState::AddressReadyForAppend { merkle_tree_data } => { - trace!( - "Processing address append for tree: {}", - self.context.merkle_tree - ); - - let batch_hash = format!( - "address_batch_{}_{}", - self.context.merkle_tree, self.context.epoch - ); - { - let mut cache = self.context.ops_cache.lock().await; - if cache.contains(&batch_hash) { - debug!("Skipping already processed address batch: {}", batch_hash); - return Ok(0); - } - cache.add(&batch_hash); - } - - let result = address::process_batch(&self.context, merkle_tree_data).await; - - if let Err(ref e) = result { - error!( - "Address append failed for tree {}: {:?}", - self.context.merkle_tree, e - ); - } - - let mut cache = self.context.ops_cache.lock().await; - cache.cleanup_by_key(&batch_hash); - trace!("Cache cleaned up for batch: {}", batch_hash); - - result - } - BatchReadyState::StateReadyForAppend { - merkle_tree_data, - output_queue_data, - } => { - trace!( - "Process state append for tree: {}", - self.context.merkle_tree - ); - let result = self - .process_state_append_hybrid(merkle_tree_data, output_queue_data) - .await; - if let Err(ref e) = result { - error!( - "State append failed for tree {}: {:?}", - self.context.merkle_tree, e - ); - } - result - } - BatchReadyState::StateReadyForNullify { merkle_tree_data } => { - trace!( - "Processing batch for nullify, tree: {}", - self.context.merkle_tree - ); - let result = self.process_state_nullify_hybrid(merkle_tree_data).await; - if let Err(ref e) = result { - error!( - "State nullify failed for tree {}: {:?}", - self.context.merkle_tree, e - ); - } - result - } - BatchReadyState::NotReady => { - trace!( - "Batch not ready for processing, tree: {}", - self.context.merkle_tree - ); - Ok(0) - } - } - } - - async fn verify_batch_ready(&self) -> BatchReadyState { - let rpc = match self.context.rpc_pool.get_connection().await { - Ok(rpc) => rpc, - Err(_) => return BatchReadyState::NotReady, - }; - - let merkle_tree_account = rpc - .get_account(self.context.merkle_tree) - .await - .ok() - .flatten(); - let output_queue_account = if self.tree_type == TreeType::StateV2 { - rpc.get_account(self.context.output_queue) - .await - .ok() - .flatten() - } else { - None - }; - - let (merkle_tree_data, input_ready) = if let Some(mut account) = merkle_tree_account { - match self.parse_merkle_tree_account(&mut account) { - Ok((data, ready)) => (Some(data), ready), - Err(_) => (None, false), - } - } else { - (None, false) - }; - - let (output_queue_data, output_ready) = if self.tree_type == TreeType::StateV2 { - if let Some(mut account) = output_queue_account { - match self.parse_output_queue_account(&mut account) { - Ok((data, ready)) => (Some(data), ready), - Err(_) => (None, false), - } - } else { - (None, false) - } - } else { - (None, false) - }; - - trace!( - "self.tree_type: {}, input_ready: {}, output_ready: {}", - self.tree_type, - input_ready, - output_ready - ); - - if self.tree_type == TreeType::AddressV2 { - return if input_ready { - if let Some(mt_data) = merkle_tree_data { - BatchReadyState::AddressReadyForAppend { - merkle_tree_data: mt_data, - } - } else { - BatchReadyState::NotReady - } - } else { - BatchReadyState::NotReady - }; - } - - // For State tree type, balance appends and nullifies operations - // based on the queue states - match (input_ready, output_ready) { - (true, true) => { - if let (Some(mt_data), Some(oq_data)) = (merkle_tree_data, output_queue_data) { - // If both queues are ready, check their fill levels - let input_fill = Self::calculate_completion_from_parsed( - mt_data.num_inserted_zkps, - mt_data.current_zkp_batch_index, - ); - let output_fill = Self::calculate_completion_from_parsed( - oq_data.num_inserted_zkps, - oq_data.current_zkp_batch_index, - ); - - trace!( - "Input queue fill: {:.2}, Output queue fill: {:.2}", - input_fill, - output_fill - ); - if input_fill > output_fill { - BatchReadyState::StateReadyForNullify { - merkle_tree_data: mt_data, - } - } else { - BatchReadyState::StateReadyForAppend { - merkle_tree_data: mt_data, - output_queue_data: oq_data, - } - } - } else { - BatchReadyState::NotReady - } - } - (true, false) => { - if let Some(mt_data) = merkle_tree_data { - BatchReadyState::StateReadyForNullify { - merkle_tree_data: mt_data, - } - } else { - BatchReadyState::NotReady - } - } - (false, true) => { - if let (Some(mt_data), Some(oq_data)) = (merkle_tree_data, output_queue_data) { - BatchReadyState::StateReadyForAppend { - merkle_tree_data: mt_data, - output_queue_data: oq_data, - } - } else { - BatchReadyState::NotReady - } - } - (false, false) => BatchReadyState::NotReady, - } - } - - async fn process_state_nullify_hybrid( - &self, - merkle_tree_data: ParsedMerkleTreeData, - ) -> Result { - let zkp_batch_size = merkle_tree_data.zkp_batch_size as usize; - - let batch_hash = format!( - "state_nullify_hybrid_{}_{}", - self.context.merkle_tree, self.context.epoch - ); - - { - let mut cache = self.context.ops_cache.lock().await; - if cache.contains(&batch_hash) { - trace!( - "Skipping already processed state nullify batch (hybrid): {}", - batch_hash - ); - return Ok(0); - } - cache.add(&batch_hash); - } - - state::perform_nullify(&self.context, merkle_tree_data).await?; - - trace!( - "State nullify operation (hybrid) completed for tree: {}", - self.context.merkle_tree - ); - let mut cache = self.context.ops_cache.lock().await; - cache.cleanup_by_key(&batch_hash); - trace!("Cache cleaned up for batch: {}", batch_hash); - - Ok(zkp_batch_size) - } - - async fn process_state_append_hybrid( - &self, - merkle_tree_data: ParsedMerkleTreeData, - output_queue_data: ParsedQueueData, - ) -> Result { - let zkp_batch_size = output_queue_data.zkp_batch_size as usize; - - let batch_hash = format!( - "state_append_hybrid_{}_{}", - self.context.merkle_tree, self.context.epoch - ); - { - let mut cache = self.context.ops_cache.lock().await; - if cache.contains(&batch_hash) { - trace!( - "Skipping already processed state append batch (hybrid): {}", - batch_hash - ); - return Ok(0); - } - cache.add(&batch_hash); - } - state::perform_append(&self.context, merkle_tree_data, output_queue_data).await?; - trace!( - "State append operation (hybrid) completed for tree: {}", - self.context.merkle_tree - ); - - let mut cache = self.context.ops_cache.lock().await; - cache.cleanup_by_key(&batch_hash); - - Ok(zkp_batch_size) - } - - /// Parse merkle tree account and check if batch is ready - fn parse_merkle_tree_account( - &self, - account: &mut solana_sdk::account::Account, - ) -> Result<(ParsedMerkleTreeData, bool)> { - let merkle_tree = match self.tree_type { - TreeType::AddressV2 => BatchedMerkleTreeAccount::address_from_bytes( - account.data.as_mut_slice(), - &self.context.merkle_tree.into(), - ), - TreeType::StateV2 => BatchedMerkleTreeAccount::state_from_bytes( - account.data.as_mut_slice(), - &self.context.merkle_tree.into(), - ), - _ => return Err(ForesterError::InvalidTreeType(self.tree_type).into()), - }?; - - let batch_index = merkle_tree.queue_batches.pending_batch_index; - let batch = merkle_tree - .queue_batches - .batches - .get(batch_index as usize) - .ok_or_else(|| anyhow::anyhow!("Batch not found"))?; - - let num_inserted_zkps = batch.get_num_inserted_zkps(); - let current_zkp_batch_index = batch.get_current_zkp_batch_index(); - - let mut leaves_hash_chains = Vec::new(); - for i in num_inserted_zkps..current_zkp_batch_index { - leaves_hash_chains - .push(merkle_tree.hash_chain_stores[batch_index as usize][i as usize]); - } - - debug!( - "Extracted {} hash chains from on-chain merkle tree. batch_index={}, num_inserted_zkps={}, current_zkp_batch_index={}", - leaves_hash_chains.len(), - batch_index, - num_inserted_zkps, - current_zkp_batch_index - ); - if !leaves_hash_chains.is_empty() { - debug!("First hash chain: {:?}", leaves_hash_chains.first()); - debug!("Last hash chain: {:?}", leaves_hash_chains.last()); - } - - let parsed_data = ParsedMerkleTreeData { - next_index: merkle_tree.next_index, - current_root: *merkle_tree.root_history.last().unwrap(), - root_history: merkle_tree.root_history.to_vec(), - zkp_batch_size: batch.zkp_batch_size as u16, - pending_batch_index: batch_index as u32, - num_inserted_zkps, - current_zkp_batch_index, - batch_start_index: batch.start_index, - leaves_hash_chains, - }; - - let is_ready = batch.get_state() != BatchState::Inserted - && batch.get_current_zkp_batch_index() > batch.get_num_inserted_zkps(); - - Ok((parsed_data, is_ready)) - } - - /// Parse output queue account and check if batch is ready - fn parse_output_queue_account( - &self, - account: &mut solana_sdk::account::Account, - ) -> Result<(ParsedQueueData, bool)> { - let output_queue = BatchedQueueAccount::output_from_bytes(account.data.as_mut_slice())?; - - let batch_index = output_queue.batch_metadata.pending_batch_index; - let batch = output_queue - .batch_metadata - .batches - .get(batch_index as usize) - .ok_or_else(|| anyhow::anyhow!("Batch not found"))?; - - let num_inserted_zkps = batch.get_num_inserted_zkps(); - let current_zkp_batch_index = batch.get_current_zkp_batch_index(); - - let mut leaves_hash_chains = Vec::new(); - for i in num_inserted_zkps..current_zkp_batch_index { - leaves_hash_chains - .push(output_queue.hash_chain_stores[batch_index as usize][i as usize]); - } - - let parsed_data = ParsedQueueData { - zkp_batch_size: output_queue.batch_metadata.zkp_batch_size as u16, - pending_batch_index: batch_index as u32, - num_inserted_zkps, - current_zkp_batch_index, - leaves_hash_chains, - }; - - let is_ready = batch.get_state() != BatchState::Inserted - && batch.get_current_zkp_batch_index() > batch.get_num_inserted_zkps(); - - Ok((parsed_data, is_ready)) - } - - /// Calculate completion percentage from parsed data - fn calculate_completion_from_parsed( - num_inserted_zkps: u64, - current_zkp_batch_index: u64, - ) -> f64 { - let total = current_zkp_batch_index; - if total == 0 { - return 0.0; - } - let remaining = total - num_inserted_zkps; - remaining as f64 / total as f64 - } -} diff --git a/forester/src/processor/v2/mod.rs b/forester/src/processor/v2/mod.rs index 6e660ba4e4..f2ab01fa19 100644 --- a/forester/src/processor/v2/mod.rs +++ b/forester/src/processor/v2/mod.rs @@ -1,8 +1,7 @@ -mod address; -mod common; -mod state; +pub mod address; +pub mod common; +pub mod state; -use common::BatchProcessor; use light_client::rpc::Rpc; use tracing::{instrument, trace}; @@ -22,9 +21,18 @@ pub async fn process_batched_operations( tree_type: TreeType, ) -> Result { trace!("process_batched_operations"); - let processor = BatchProcessor::new(context, tree_type); - processor.process().await + match tree_type { + TreeType::AddressV2 => { + trace!("AddressV2 processing should be handled through AddressSupervisor actor"); + Ok(0) + } + TreeType::StateV2 => { + trace!("StateV2 processing should be handled through StateSupervisor actor"); + Ok(0) + } + _ => Ok(0), + } } -pub use common::BatchContext; +pub use common::{BatchContext, ProverConfig}; use light_compressed_account::TreeType; diff --git a/forester/src/processor/v2/state.rs b/forester/src/processor/v2/state.rs deleted file mode 100644 index 2d2ea9aa12..0000000000 --- a/forester/src/processor/v2/state.rs +++ /dev/null @@ -1,123 +0,0 @@ -use anyhow::{Error, Ok}; -use borsh::BorshSerialize; -use forester_utils::instructions::{ - state_batch_append::get_append_instruction_stream, - state_batch_nullify::get_nullify_instruction_stream, -}; -use futures::stream::{Stream, StreamExt}; -use light_batched_merkle_tree::merkle_tree::{ - InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, -}; -use light_client::rpc::Rpc; -use light_registry::account_compression_cpi::sdk::{ - create_batch_append_instruction, create_batch_nullify_instruction, -}; -use solana_program::instruction::Instruction; -use solana_sdk::signer::Signer; -use tracing::instrument; - -use super::common::{process_stream, BatchContext, ParsedMerkleTreeData, ParsedQueueData}; -use crate::Result; - -async fn create_nullify_stream_future( - ctx: &BatchContext, - merkle_tree_data: ParsedMerkleTreeData, -) -> Result<( - impl Stream>> + Send, - u16, -)> -where - R: Rpc, -{ - let (stream, size) = get_nullify_instruction_stream( - ctx.rpc_pool.clone(), - ctx.merkle_tree, - ctx.prover_update_url.clone(), - ctx.prover_api_key.clone(), - ctx.prover_polling_interval, - ctx.prover_max_wait_time, - merkle_tree_data, - ) - .await - .map_err(Error::from)?; - let stream = stream.map(|item| item.map_err(Error::from)); - Ok((stream, size)) -} - -async fn create_append_stream_future( - ctx: &BatchContext, - merkle_tree_data: ParsedMerkleTreeData, - output_queue_data: ParsedQueueData, -) -> Result<( - impl Stream>> + Send, - u16, -)> -where - R: Rpc, -{ - let (stream, size) = get_append_instruction_stream( - ctx.rpc_pool.clone(), - ctx.merkle_tree, - ctx.prover_append_url.clone(), - ctx.prover_api_key.clone(), - ctx.prover_polling_interval, - ctx.prover_max_wait_time, - merkle_tree_data, - output_queue_data, - ) - .await - .map_err(Error::from)?; - let stream = stream.map(|item| item.map_err(Error::from)); - Ok((stream, size)) -} - -#[instrument( - level = "debug", - skip(context, merkle_tree_data), - fields(merkle_tree = ?context.merkle_tree) -)] -pub(crate) async fn perform_nullify( - context: &BatchContext, - merkle_tree_data: ParsedMerkleTreeData, -) -> Result<()> { - let instruction_builder = |data: &InstructionDataBatchNullifyInputs| -> Instruction { - create_batch_nullify_instruction( - context.authority.pubkey(), - context.derivation, - context.merkle_tree, - context.epoch, - data.try_to_vec().unwrap(), - ) - }; - - let stream_future = create_nullify_stream_future(context, merkle_tree_data); - - process_stream(context, stream_future, instruction_builder).await?; - Ok(()) -} - -#[instrument( - level = "debug", - skip(context, merkle_tree_data, output_queue_data), - fields(merkle_tree = ?context.merkle_tree) -)] -pub(crate) async fn perform_append( - context: &BatchContext, - merkle_tree_data: ParsedMerkleTreeData, - output_queue_data: ParsedQueueData, -) -> Result<()> { - let instruction_builder = |data: &InstructionDataBatchAppendInputs| -> Instruction { - create_batch_append_instruction( - context.authority.pubkey(), - context.derivation, - context.merkle_tree, - context.output_queue, - context.epoch, - data.try_to_vec().unwrap(), - ) - }; - - let stream_future = create_append_stream_future(context, merkle_tree_data, output_queue_data); - process_stream(context, stream_future, instruction_builder).await?; - Ok(()) -} diff --git a/forester/src/processor/v2/state/helpers.rs b/forester/src/processor/v2/state/helpers.rs new file mode 100644 index 0000000000..8706275e39 --- /dev/null +++ b/forester/src/processor/v2/state/helpers.rs @@ -0,0 +1,162 @@ +use anyhow::anyhow; +use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; +use light_client::{ + indexer::{Indexer, QueueElementsV2Options}, + rpc::Rpc, +}; +use light_compressed_account::Pubkey; +use tracing::warn; + +use crate::processor::v2::BatchContext; + +/// Fetches zkp_batch_size from on-chain merkle tree account (called once at startup) +pub async fn fetch_zkp_batch_size(context: &BatchContext) -> crate::Result { + let rpc = context.rpc_pool.get_connection().await?; + let mut account = rpc + .get_account(context.merkle_tree) + .await? + .ok_or_else(|| anyhow!("Merkle tree account not found"))?; + + let tree = BatchedMerkleTreeAccount::state_from_bytes( + account.data.as_mut_slice(), + &context.merkle_tree.into(), + )?; + + let batch_index = tree.queue_batches.pending_batch_index; + let batch = tree + .queue_batches + .batches + .get(batch_index as usize) + .ok_or_else(|| anyhow!("Batch not found"))?; + + Ok(batch.zkp_batch_size) +} + +pub async fn fetch_address_zkp_batch_size(context: &BatchContext) -> crate::Result { + let rpc = context.rpc_pool.get_connection().await?; + let mut account = rpc + .get_account(context.merkle_tree) + .await? + .ok_or_else(|| anyhow!("Merkle tree account not found"))?; + + let merkle_tree_pubkey = Pubkey::from(context.merkle_tree.to_bytes()); + let tree = BatchedMerkleTreeAccount::address_from_bytes(&mut account.data, &merkle_tree_pubkey) + .map_err(|e| anyhow!("Failed to deserialize address tree: {}", e))?; + + let batch_index = tree.queue_batches.pending_batch_index; + let batch = tree + .queue_batches + .batches + .get(batch_index as usize) + .ok_or_else(|| anyhow!("Batch not found"))?; + + Ok(batch.zkp_batch_size) +} + +pub async fn fetch_batches( + context: &BatchContext, + output_start_index: Option, + input_start_index: Option, + fetch_len: u64, + zkp_batch_size: u64, +) -> crate::Result> { + let fetch_len_u16: u16 = match fetch_len.try_into() { + Ok(v) => v, + Err(_) => { + warn!( + "fetch_len {} exceeds u16::MAX, clamping to {}", + fetch_len, + u16::MAX + ); + u16::MAX + } + }; + let zkp_batch_size_u16: u16 = match zkp_batch_size.try_into() { + Ok(v) => v, + Err(_) => { + warn!( + "zkp_batch_size {} exceeds u16::MAX, clamping to {}", + zkp_batch_size, + u16::MAX + ); + u16::MAX + } + }; + + let mut rpc = context.rpc_pool.get_connection().await?; + let indexer = rpc.indexer_mut()?; + let options = QueueElementsV2Options::default() + .with_output_queue(output_start_index, Some(fetch_len_u16)) + .with_output_queue_batch_size(Some(zkp_batch_size_u16)) + .with_input_queue(input_start_index, Some(fetch_len_u16)) + .with_input_queue_batch_size(Some(zkp_batch_size_u16)); + + let res = indexer + .get_queue_elements(context.merkle_tree.to_bytes(), options, None) + .await?; + + Ok(res.value.state_queue) +} + +pub async fn fetch_address_batches( + context: &BatchContext, + output_start_index: Option, + fetch_len: u64, + zkp_batch_size: u64, +) -> crate::Result> { + let fetch_len_u16: u16 = match fetch_len.try_into() { + Ok(v) => v, + Err(_) => { + warn!( + "fetch_len {} exceeds u16::MAX, clamping to {}", + fetch_len, + u16::MAX + ); + u16::MAX + } + }; + let zkp_batch_size_u16: u16 = match zkp_batch_size.try_into() { + Ok(v) => v, + Err(_) => { + warn!( + "zkp_batch_size {} exceeds u16::MAX, clamping to {}", + zkp_batch_size, + u16::MAX + ); + u16::MAX + } + }; + + let mut rpc = context.rpc_pool.get_connection().await?; + let indexer = rpc.indexer_mut()?; + + let options = QueueElementsV2Options::default() + .with_address_queue(output_start_index, Some(fetch_len_u16)) + .with_address_queue_batch_size(Some(zkp_batch_size_u16)); + + tracing::debug!( + "fetch_address_batches: tree={}, start={:?}, len={}, zkp_batch_size={}", + context.merkle_tree, + output_start_index, + fetch_len_u16, + zkp_batch_size_u16 + ); + + let res = indexer + .get_queue_elements(context.merkle_tree.to_bytes(), options, None) + .await?; + + if let Some(ref aq) = res.value.address_queue { + tracing::debug!( + "fetch_address_batches response: address_queue present = true, addresses={}, subtrees={}, leaves_hash_chains={}, start_index={}", + aq.addresses.len(), + aq.subtrees.len(), + aq.leaves_hash_chains.len(), + aq.start_index + ); + } else { + tracing::debug!("fetch_address_batches response: address_queue present = false"); + } + + Ok(res.value.address_queue) +} diff --git a/forester/src/processor/v2/state/mod.rs b/forester/src/processor/v2/state/mod.rs new file mode 100644 index 0000000000..004296d171 --- /dev/null +++ b/forester/src/processor/v2/state/mod.rs @@ -0,0 +1,8 @@ +pub mod helpers; +pub mod proof_worker; +mod supervisor; +pub mod tx_sender; + +pub use supervisor::{ProcessQueueUpdate, QueueWork, StateSupervisor}; + +pub use crate::processor::v2::common::UpdateEligibility; diff --git a/forester/src/processor/v2/state/proof_worker.rs b/forester/src/processor/v2/state/proof_worker.rs new file mode 100644 index 0000000000..20a7a4d2ad --- /dev/null +++ b/forester/src/processor/v2/state/proof_worker.rs @@ -0,0 +1,154 @@ +use async_channel::Receiver; +use light_batched_merkle_tree::merkle_tree::{ + InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, +}; +use light_prover_client::{ + proof_client::ProofClient, + proof_types::{ + batch_address_append::BatchAddressAppendInputs, batch_append::BatchAppendsCircuitInputs, + batch_update::BatchUpdateCircuitInputs, + }, +}; +use tokio::sync::mpsc; +use tracing::{debug, info, trace, warn}; + +use crate::processor::v2::{state::tx_sender::BatchInstruction, ProverConfig}; + +#[derive(Debug)] +pub enum ProofInput { + Append(BatchAppendsCircuitInputs), + Nullify(BatchUpdateCircuitInputs), + AddressAppend(BatchAddressAppendInputs), +} + +pub struct ProofJob { + pub(crate) seq: u64, + pub(crate) inputs: ProofInput, + pub(crate) result_tx: mpsc::Sender, +} + +#[derive(Debug)] +pub struct ProofResult { + pub(crate) seq: u64, + pub(crate) instruction: BatchInstruction, +} + +pub fn spawn_proof_workers( + num_workers: usize, + config: ProverConfig, +) -> async_channel::Sender { + // Enforce minimum of 1 worker to prevent zero-capacity channels and no workers + let num_workers = if num_workers == 0 { + warn!("spawn_proof_workers called with num_workers=0, using 1 instead"); + 1 + } else { + num_workers + }; + + let channel_capacity = num_workers * 2; + let (job_tx, job_rx) = async_channel::bounded::(channel_capacity); + + for worker_id in 0..num_workers { + let job_rx = job_rx.clone(); + let config = config.clone(); + tokio::spawn(async move { run_proof_worker(worker_id, job_rx, config).await }); + } + + info!("Spawned {} proof workers", num_workers); + job_tx +} + +async fn run_proof_worker( + worker_id: usize, + job_rx: Receiver, + config: ProverConfig, +) -> crate::Result<()> { + let append_client = ProofClient::with_config( + config.append_url, + config.polling_interval, + config.max_wait_time, + config.api_key.clone(), + ); + let nullify_client = ProofClient::with_config( + config.update_url, + config.polling_interval, + config.max_wait_time, + config.api_key, + ); + + trace!("ProofWorker {} started", worker_id); + + while let Ok(job) = job_rx.recv().await { + debug!("ProofWorker {} processing job seq={}", worker_id, job.seq); + + let result = match job.inputs { + ProofInput::Append(inputs) => { + match append_client.generate_batch_append_proof(inputs).await { + Ok((proof, new_root)) => ProofResult { + seq: job.seq, + instruction: BatchInstruction::Append(vec![ + InstructionDataBatchAppendInputs { + new_root, + compressed_proof: proof.into(), + }, + ]), + }, + Err(e) => { + warn!("ProofWorker {} append proof failed: {}", worker_id, e); + continue; + } + } + } + ProofInput::Nullify(inputs) => { + match nullify_client.generate_batch_update_proof(inputs).await { + Ok((proof, new_root)) => ProofResult { + seq: job.seq, + instruction: BatchInstruction::Nullify(vec![ + InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: proof.into(), + }, + ]), + }, + Err(e) => { + warn!("ProofWorker {} nullify proof failed: {}", worker_id, e); + continue; + } + } + } + ProofInput::AddressAppend(inputs) => { + match append_client.generate_batch_address_append_proof(inputs).await { + Ok((proof, new_root)) => ProofResult { + seq: job.seq, + instruction: BatchInstruction::AddressAppend(vec![ + light_batched_merkle_tree::merkle_tree::InstructionDataAddressAppendInputs { + new_root, + compressed_proof: proof.into(), + }, + ]), + }, + Err(e) => { + warn!( + "ProofWorker {} address append proof failed: {}", + worker_id, e + ); + continue; + } + } + } + }; + + // Send result via the job's own channel - if it's closed, just continue to next job + if job.result_tx.send(result).await.is_err() { + debug!( + "ProofWorker {} result channel closed for job seq={}, continuing", + worker_id, job.seq + ); + } else { + debug!("ProofWorker {} completed job seq={}", worker_id, job.seq); + } + } + + trace!("ProofWorker {} shutting down", worker_id); + Ok(()) +} diff --git a/forester/src/processor/v2/state/supervisor.rs b/forester/src/processor/v2/state/supervisor.rs new file mode 100644 index 0000000000..b4b618fccf --- /dev/null +++ b/forester/src/processor/v2/state/supervisor.rs @@ -0,0 +1,537 @@ +use anyhow::anyhow; +use forester_utils::staging_tree::{BatchType, StagingTree}; +use kameo::{ + actor::{ActorRef, WeakActorRef}, + error::ActorStopReason, + message::Message, + Actor, +}; +use light_batched_merkle_tree::constants::DEFAULT_BATCH_STATE_TREE_HEIGHT; +use light_client::rpc::Rpc; +use light_compressed_account::QueueType; +use light_prover_client::proof_types::{ + batch_append::BatchAppendsCircuitInputs, batch_update::BatchUpdateCircuitInputs, +}; +use light_registry::protocol_config::state::EpochState; +use tokio::sync::mpsc; +use tracing::{debug, info, trace, warn}; + +use crate::processor::v2::{ + state::{ + helpers::{fetch_batches, fetch_zkp_batch_size}, + proof_worker::{spawn_proof_workers, ProofInput, ProofJob, ProofResult}, + tx_sender::TxSender, + UpdateEligibility, + }, + BatchContext, +}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum Phase { + Append, + Nullify, +} + +#[derive(Debug, Clone)] +pub struct QueueWork { + pub queue_type: QueueType, + pub queue_size: u64, +} + +pub struct ProcessQueueUpdate { + pub queue_work: QueueWork, +} + +struct WorkerPool { + job_tx: async_channel::Sender, +} + +pub struct StateSupervisor { + context: BatchContext, + staging_tree: Option, + current_root: [u8; 32], + next_index: u64, + zkp_batch_size: u64, + seq: u64, + worker_pool: Option, +} + +impl Actor for StateSupervisor { + type Args = BatchContext; + type Error = anyhow::Error; + + async fn on_start( + context: Self::Args, + _actor_ref: ActorRef, + ) -> Result { + info!( + "StateSupervisor actor starting for tree {}", + context.merkle_tree + ); + + // Fetch zkp_batch_size once from on-chain (this is static per tree) + let zkp_batch_size = fetch_zkp_batch_size(&context).await?; + info!( + "StateSupervisor fetched zkp_batch_size={} for tree {}", + zkp_batch_size, context.merkle_tree + ); + + Ok(Self { + context, + staging_tree: None, + current_root: [0u8; 32], + next_index: 0, + zkp_batch_size, + seq: 0, + worker_pool: None, + }) + } + + async fn on_stop( + &mut self, + _actor_ref: WeakActorRef, + _reason: ActorStopReason, + ) -> Result<(), Self::Error> { + info!( + "StateSupervisor actor stopping for tree {}", + self.context.merkle_tree + ); + Ok(()) + } +} + +impl Message for StateSupervisor { + type Reply = crate::Result; + + async fn handle( + &mut self, + msg: ProcessQueueUpdate, + _ctx: &mut kameo::message::Context, + ) -> Self::Reply { + self.process_queue_update(msg.queue_work).await + } +} + +impl Message for StateSupervisor { + type Reply = (); + + async fn handle( + &mut self, + msg: UpdateEligibility, + _ctx: &mut kameo::message::Context, + ) -> Self::Reply { + debug!( + "Updating eligibility end slot to {} for tree {}", + msg.end_slot, self.context.merkle_tree + ); + self.context + .forester_eligibility_end_slot + .store(msg.end_slot, std::sync::atomic::Ordering::Relaxed); + } +} + +impl StateSupervisor { + fn zkp_batch_size(&self) -> u64 { + self.zkp_batch_size + } + + /// Gets the leaves hashchain for a batch, returning an error if not found. + fn get_leaves_hashchain( + leaves_hash_chains: &[[u8; 32]], + batch_idx: usize, + ) -> crate::Result<[u8; 32]> { + leaves_hash_chains.get(batch_idx).copied().ok_or_else(|| { + anyhow!( + "Missing leaves_hash_chain for batch {} (available: {})", + batch_idx, + leaves_hash_chains.len() + ) + }) + } + + /// Computes the slice range for a batch given total length and start index. + fn batch_range(&self, total_len: usize, start: usize) -> std::ops::Range { + let end = (start + self.zkp_batch_size as usize).min(total_len); + start..end + } + + /// Finalizes a proof job by updating state and returning the job. + fn finish_job( + &mut self, + new_root: [u8; 32], + inputs: ProofInput, + result_tx: mpsc::Sender, + ) -> Option { + self.current_root = new_root; + let job_seq = self.seq; + self.seq += 1; + Some(ProofJob { + seq: job_seq, + inputs, + result_tx, + }) + } + + fn ensure_worker_pool(&mut self) { + if self.worker_pool.is_none() { + let num_workers = self.context.num_proof_workers.max(1); + let job_tx = spawn_proof_workers(num_workers, self.context.prover_config.clone()); + + info!( + "StateSupervisor spawned {} persistent proof workers for tree {}", + num_workers, self.context.merkle_tree + ); + + self.worker_pool = Some(WorkerPool { job_tx }); + } + } + + async fn process_queue_update(&mut self, queue_work: QueueWork) -> crate::Result { + debug!( + "StateSupervisor processing queue update for tree {} (hint: {} items)", + self.context.merkle_tree, queue_work.queue_size + ); + + // Check if we're still in the active phase before processing + let current_slot = self.context.slot_tracker.estimated_current_slot(); + let current_phase = self + .context + .epoch_phases + .get_current_epoch_state(current_slot); + + if current_phase != EpochState::Active { + debug!( + "Skipping queue update: not in active phase (current: {:?}, slot: {}, epoch: {})", + current_phase, current_slot, self.context.epoch + ); + return Ok(0); + } + + let zkp_batch_size = self.zkp_batch_size(); + if queue_work.queue_size < zkp_batch_size { + trace!( + "Queue size {} below zkp_batch_size {}, skipping", + queue_work.queue_size, + zkp_batch_size + ); + return Ok(0); + } + + let phase = match queue_work.queue_type { + QueueType::OutputStateV2 => Phase::Append, + QueueType::InputStateV2 => Phase::Nullify, + other => { + warn!("Unsupported queue type for state processing: {:?}", other); + return Ok(0); + } + }; + + let max_batches = (queue_work.queue_size / zkp_batch_size) as usize; + if max_batches == 0 { + return Ok(0); + } + + self.ensure_worker_pool(); + + let num_workers = self.context.num_proof_workers.max(1); + + let (proof_tx, proof_rx) = mpsc::channel(num_workers * 2); + + // Reset seq counter - TxSender always expects seq to start at 0 + self.seq = 0; + + let tx_sender_handle = TxSender::spawn( + self.context.clone(), + proof_rx, + self.zkp_batch_size(), + self.current_root, + ); + + let job_tx = self + .worker_pool + .as_ref() + .expect("worker pool should be initialized") + .job_tx + .clone(); + let jobs_sent = self + .enqueue_batches(phase, max_batches, job_tx, proof_tx) + .await?; + + let tx_processed = match tx_sender_handle.await { + Ok(res) => match res { + Ok(processed) => processed, + Err(e) => { + warn!("Tx sender error, resetting staging tree: {}", e); + self.reset_staging_tree(); + return Err(e); + } + }, + Err(e) => { + warn!("Tx sender join error, resetting staging tree: {}", e); + self.reset_staging_tree(); + return Err(anyhow!("Tx sender join error: {}", e)); + } + }; + + if tx_processed < jobs_sent * self.zkp_batch_size as usize { + debug!( + "Processed {} items but sent {} jobs (expected {}), some proofs may have failed", + tx_processed, + jobs_sent, + jobs_sent * self.zkp_batch_size as usize + ); + } + + Ok(tx_processed) + } + + fn reset_staging_tree(&mut self) { + info!( + "Resetting staging tree for tree {}", + self.context.merkle_tree + ); + self.staging_tree = None; + } + + fn build_staging_tree( + &mut self, + leaf_indices: &[u64], + leaves: &[[u8; 32]], + nodes: &[u64], + node_hashes: &[[u8; 32]], + initial_root: [u8; 32], + root_seq: u64, + ) -> crate::Result<()> { + self.staging_tree = Some(StagingTree::new( + leaf_indices, + leaves, + nodes, + node_hashes, + initial_root, + root_seq, + DEFAULT_BATCH_STATE_TREE_HEIGHT as usize, + )?); + debug!("Built staging tree from indexer (seq={})", root_seq); + Ok(()) + } + + async fn enqueue_batches( + &mut self, + phase: Phase, + max_batches: usize, + job_tx: async_channel::Sender, + result_tx: mpsc::Sender, + ) -> crate::Result { + let zkp_batch_size = self.zkp_batch_size() as usize; + let total_needed = max_batches.saturating_mul(zkp_batch_size); + let fetch_len = total_needed as u64; + + let state_queue = + fetch_batches(&self.context, None, None, fetch_len, self.zkp_batch_size()).await?; + + let Some(state_queue) = state_queue else { + return Ok(0); + }; + + let mut jobs_sent = 0usize; + + match phase { + Phase::Append => { + let Some(output_batch) = state_queue.output_queue.as_ref() else { + return Ok(0); + }; + if output_batch.leaf_indices.is_empty() { + return Ok(0); + } + + self.current_root = state_queue.initial_root; + self.next_index = output_batch.next_index; + info!( + "Synced from indexer: root {:?}[..4], next_index {}", + &self.current_root[..4], + self.next_index + ); + + self.build_staging_tree( + &output_batch.leaf_indices, + &output_batch.old_leaves, + &state_queue.nodes, + &state_queue.node_hashes, + state_queue.initial_root, + state_queue.root_seq, + )?; + + let available = output_batch.leaf_indices.len(); + let num_slices = (available / zkp_batch_size).min(max_batches); + + for batch_idx in 0..num_slices { + let start = batch_idx * zkp_batch_size; + if let Some(job) = self + .build_append_job(batch_idx, &state_queue, start, result_tx.clone()) + .await? + { + job_tx.send(job).await?; + jobs_sent += 1; + } else { + break; + } + } + } + Phase::Nullify => { + let Some(input_batch) = state_queue.input_queue.as_ref() else { + return Ok(0); + }; + if input_batch.leaf_indices.is_empty() { + return Ok(0); + } + + self.current_root = state_queue.initial_root; + info!( + "Synced from indexer: root {:?}[..4]", + &self.current_root[..4] + ); + + self.build_staging_tree( + &input_batch.leaf_indices, + &input_batch.current_leaves, + &state_queue.nodes, + &state_queue.node_hashes, + state_queue.initial_root, + state_queue.root_seq, + )?; + + let available = input_batch.leaf_indices.len(); + let num_slices = (available / zkp_batch_size).min(max_batches); + + for batch_idx in 0..num_slices { + let start = batch_idx * zkp_batch_size; + if let Some(job) = self + .build_nullify_job(batch_idx, &state_queue, start, result_tx.clone()) + .await? + { + job_tx.send(job).await?; + jobs_sent += 1; + } else { + break; + } + } + } + } + + drop(result_tx); + + info!("Enqueued {} jobs for proof generation", jobs_sent); + Ok(jobs_sent) + } + + async fn build_append_job( + &mut self, + batch_idx: usize, + state_queue: &light_client::indexer::StateQueueDataV2, + start: usize, + result_tx: mpsc::Sender, + ) -> crate::Result> { + let batch = state_queue + .output_queue + .as_ref() + .ok_or_else(|| anyhow!("Output queue not present in state queue"))?; + + let range = self.batch_range(batch.account_hashes.len(), start); + let leaves = batch.account_hashes[range.clone()].to_vec(); + let leaf_indices = batch.leaf_indices[range].to_vec(); + + let hashchain_idx = start / self.zkp_batch_size as usize; + let batch_seq = state_queue.root_seq + (batch_idx as u64) + 1; + + let staging = self.staging_tree.as_mut().ok_or_else(|| { + anyhow!( + "Staging tree not initialized for append job (batch_idx={})", + batch_idx + ) + })?; + let result = staging.process_batch_updates( + &leaf_indices, + &leaves, + BatchType::Append, + batch_idx, + batch_seq, + )?; + let new_root = result.new_root; + + let leaves_hashchain = + Self::get_leaves_hashchain(&batch.leaves_hash_chains, hashchain_idx)?; + let start_index = leaf_indices.first().copied().unwrap_or(0) as u32; + + let circuit_inputs = + BatchAppendsCircuitInputs::new::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>( + result.into(), + start_index, + leaves.clone(), + leaves_hashchain, + self.zkp_batch_size as u32, + ) + .map_err(|e| anyhow!("Failed to build append inputs: {}", e))?; + + self.next_index = self.next_index.saturating_add(self.zkp_batch_size); + Ok(self.finish_job(new_root, ProofInput::Append(circuit_inputs), result_tx)) + } + + async fn build_nullify_job( + &mut self, + batch_idx: usize, + state_queue: &light_client::indexer::StateQueueDataV2, + start: usize, + result_tx: mpsc::Sender, + ) -> crate::Result> { + let batch = state_queue + .input_queue + .as_ref() + .ok_or_else(|| anyhow!("Input queue not present in state queue"))?; + + let range = self.batch_range(batch.account_hashes.len(), start); + let account_hashes = batch.account_hashes[range.clone()].to_vec(); + let tx_hashes = batch.tx_hashes[range.clone()].to_vec(); + let nullifiers = batch.nullifiers[range.clone()].to_vec(); + let leaf_indices = batch.leaf_indices[range].to_vec(); + let hashchain_idx = start / self.zkp_batch_size as usize; + let batch_seq = state_queue.root_seq + (batch_idx as u64) + 1; + + let staging = self.staging_tree.as_mut().ok_or_else(|| { + anyhow!( + "Staging tree not initialized for nullify job (batch_idx={})", + batch_idx + ) + })?; + let result = staging.process_batch_updates( + &leaf_indices, + &nullifiers, + BatchType::Nullify, + batch_idx, + batch_seq, + )?; + info!( + "nullify batch {} root {:?}[..4] => {:?}[..4]", + batch_idx, + &result.old_root[..4], + &result.new_root[..4] + ); + + let new_root = result.new_root; + let leaves_hashchain = + Self::get_leaves_hashchain(&batch.leaves_hash_chains, hashchain_idx)?; + let path_indices: Vec = leaf_indices.iter().map(|idx| *idx as u32).collect(); + + let circuit_inputs = + BatchUpdateCircuitInputs::new::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>( + result.into(), + tx_hashes, + account_hashes, + leaves_hashchain, + path_indices, + self.zkp_batch_size as u32, + ) + .map_err(|e| anyhow!("Failed to build nullify inputs: {}", e))?; + + Ok(self.finish_job(new_root, ProofInput::Nullify(circuit_inputs), result_tx)) + } +} diff --git a/forester/src/processor/v2/state/tx_sender.rs b/forester/src/processor/v2/state/tx_sender.rs new file mode 100644 index 0000000000..d15ec7285b --- /dev/null +++ b/forester/src/processor/v2/state/tx_sender.rs @@ -0,0 +1,151 @@ +use std::collections::BTreeMap; + +use borsh::BorshSerialize; +use light_batched_merkle_tree::merkle_tree::{ + InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, +}; +use light_client::rpc::Rpc; +use light_registry::account_compression_cpi::sdk::{ + create_batch_append_instruction, create_batch_nullify_instruction, + create_batch_update_address_tree_instruction, +}; +use solana_sdk::signature::Signer; +use tokio::{sync::mpsc, task::JoinHandle}; +use tracing::{info, warn}; + +use crate::{ + errors::ForesterError, + processor::v2::{ + common::send_transaction_batch, state::proof_worker::ProofResult, BatchContext, + }, +}; + +#[derive(Debug)] +pub enum BatchInstruction { + Append(Vec), + Nullify(Vec), + AddressAppend(Vec), +} + +pub struct TxSender { + context: BatchContext, + expected_seq: u64, + buffer: BTreeMap, + zkp_batch_size: u64, + last_seen_root: [u8; 32], +} + +impl TxSender { + pub(crate) fn spawn( + context: BatchContext, + proof_rx: mpsc::Receiver, + zkp_batch_size: u64, + last_seen_root: [u8; 32], + ) -> JoinHandle> { + let sender = Self { + context, + expected_seq: 0, + buffer: BTreeMap::new(), + zkp_batch_size, + last_seen_root, + }; + + tokio::spawn(async move { sender.run(proof_rx).await }) + } + + async fn run(mut self, mut proof_rx: mpsc::Receiver) -> crate::Result { + let mut processed = 0usize; + + while let Some(result) = proof_rx.recv().await { + self.buffer.insert(result.seq, result.instruction); + + while let Some(instr) = self.buffer.remove(&self.expected_seq) { + let (instructions, expected_root) = match &instr { + BatchInstruction::Append(proofs) => { + let ix = proofs + .iter() + .map(|data| { + Ok(create_batch_append_instruction( + self.context.authority.pubkey(), + self.context.derivation, + self.context.merkle_tree, + self.context.output_queue, + self.context.epoch, + data.try_to_vec()?, + )) + }) + .collect::>>()?; + (ix, proofs.last().map(|p| p.new_root)) + } + BatchInstruction::Nullify(proofs) => { + let ix = proofs + .iter() + .map(|data| { + Ok(create_batch_nullify_instruction( + self.context.authority.pubkey(), + self.context.derivation, + self.context.merkle_tree, + self.context.epoch, + data.try_to_vec()?, + )) + }) + .collect::>>()?; + (ix, proofs.last().map(|p| p.new_root)) + } + BatchInstruction::AddressAppend(proofs) => { + let ix = proofs + .iter() + .map(|data| { + Ok(create_batch_update_address_tree_instruction( + self.context.authority.pubkey(), + self.context.derivation, + self.context.merkle_tree, + self.context.epoch, + data.try_to_vec()?, + )) + }) + .collect::>>()?; + (ix, proofs.last().map(|p| p.new_root)) + } + }; + + let instr_type = match &instr { + BatchInstruction::Append(_) => "Append", + BatchInstruction::Nullify(_) => "Nullify", + BatchInstruction::AddressAppend(_) => "AddressAppend", + }; + + match send_transaction_batch(&self.context, instructions).await { + Ok(sig) => { + if let Some(root) = expected_root { + self.last_seen_root = root; + } + processed += self.zkp_batch_size as usize; + self.expected_seq += 1; + info!( + "tx sent: {} type={} root={:?} seq={} epoch={}", + sig, + instr_type, + self.last_seen_root, + self.expected_seq, + self.context.epoch + ); + } + Err(e) => { + warn!("tx error {} epoch {}", e, self.context.epoch); + return if let Some(ForesterError::NotInActivePhase) = + e.downcast_ref::() + { + warn!("Active phase ended while sending tx, stopping sender loop"); + Ok(processed) + } else { + Err(e) + }; + } + } + } + } + + Ok(processed) + } +} diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index ff7f8b4b25..6d0c8fffc6 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -212,6 +212,8 @@ async fn e2e_test() { prover_update_url: None, prover_address_append_url: None, prover_api_key: get_prover_api_key(), + prover_polling_interval: None, + prover_max_wait_time: None, photon_api_key: get_photon_api_key(), photon_grpc_url: get_photon_grpc_url(), pushgateway_url: None, @@ -455,7 +457,10 @@ async fn e2e_test() { compressible_account_subscriber ); - execute_test_transactions( + let iterations: usize = 50; + + let test_iterations = execute_test_transactions( + iterations, &mut rpc, rng, &env, @@ -470,7 +475,12 @@ async fn e2e_test() { ) .await; - wait_for_work_report(&mut work_report_receiver, &state_tree_params).await; + wait_for_work_report( + &mut work_report_receiver, + &state_tree_params, + test_iterations, + ) + .await; // Verify root changes based on enabled tests if is_v1_state_test_enabled() { @@ -741,17 +751,6 @@ async fn verify_root_changed( ); } -async fn get_state_v2_batch_size(rpc: &mut R, merkle_tree_pubkey: &Pubkey) -> u64 { - let mut merkle_tree_account = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap(); - let merkle_tree = BatchedMerkleTreeAccount::state_from_bytes( - merkle_tree_account.data.as_mut_slice(), - &merkle_tree_pubkey.into(), - ) - .unwrap(); - - merkle_tree.get_metadata().queue_batches.batch_size -} - async fn setup_forester_pipeline( config: &ForesterConfig, ) -> ( @@ -788,14 +787,10 @@ async fn setup_forester_pipeline( async fn wait_for_work_report( work_report_receiver: &mut mpsc::Receiver, tree_params: &InitStateTreeAccountsInstructionData, + expected_minimum_processed_items: usize, ) { let batch_size = tree_params.output_queue_zkp_batch_size as usize; - // With increased test size, expect more processed items - let minimum_processed_items: usize = if is_v2_state_test_enabled() { - (tree_params.output_queue_batch_size as usize) * 4 // Expect at least 4 batches worth - } else { - tree_params.output_queue_batch_size as usize - }; + let mut total_processed_items: usize = 0; let timeout_duration = Duration::from_secs(DEFAULT_TIMEOUT_SECONDS); @@ -803,11 +798,11 @@ async fn wait_for_work_report( println!("Batch size: {}", batch_size); println!( "Minimum required processed items: {}", - minimum_processed_items + expected_minimum_processed_items ); let start_time = tokio::time::Instant::now(); - while total_processed_items < minimum_processed_items { + while total_processed_items < expected_minimum_processed_items { match timeout( timeout_duration.saturating_sub(start_time.elapsed()), work_report_receiver.recv(), @@ -817,6 +812,11 @@ async fn wait_for_work_report( Ok(Some(report)) => { println!("Received work report: {:?}", report); total_processed_items += report.processed_items; + + if total_processed_items >= expected_minimum_processed_items { + println!("Received required number of processed items."); + break; + } } Ok(None) => { println!("Work report channel closed unexpectedly"); @@ -831,15 +831,16 @@ async fn wait_for_work_report( println!("Total processed items: {}", total_processed_items); assert!( - total_processed_items >= minimum_processed_items, + total_processed_items >= expected_minimum_processed_items, "Processed fewer items ({}) than required ({})", total_processed_items, - minimum_processed_items + expected_minimum_processed_items ); } #[allow(clippy::too_many_arguments)] async fn execute_test_transactions( + iterations: usize, rpc: &mut R, rng: &mut StdRng, env: &TestAccounts, @@ -851,14 +852,7 @@ async fn execute_test_transactions( sender_batched_token_counter: &mut u64, address_v1_counter: &mut u64, address_v2_counter: &mut u64, -) { - let mut iterations = 4; - if is_v2_state_test_enabled() { - let batch_size = - get_state_v2_batch_size(rpc, &env.v2_state_trees[0].merkle_tree).await as usize; - iterations = batch_size * 2; - } - +) -> usize { println!("Executing {} test transactions", iterations); println!("==========================================="); for i in 0..iterations { @@ -963,6 +957,8 @@ async fn execute_test_transactions( println!("{} v2 address create: {:?}", i, sig_v2_addr); } } + + iterations } async fn mint_to( diff --git a/forester/tests/legacy/test_utils.rs b/forester/tests/legacy/test_utils.rs index ad0d7a6666..35c7e0e3fa 100644 --- a/forester/tests/legacy/test_utils.rs +++ b/forester/tests/legacy/test_utils.rs @@ -75,7 +75,14 @@ pub fn forester_config() -> ForesterConfig { ws_rpc_url: Some("ws://localhost:8900".to_string()), indexer_url: Some("http://localhost:8784".to_string()), prover_url: Some("http://localhost:3001".to_string()), + prover_append_url: None, + prover_update_url: None, + prover_address_append_url: None, + prover_api_key: None, + prover_polling_interval: None, + prover_max_wait_time: None, photon_api_key: None, + photon_grpc_url: None, pushgateway_url: None, pagerduty_routing_key: None, rpc_rate_limit: None, @@ -111,6 +118,7 @@ pub fn forester_config() -> ForesterConfig { derivation_pubkey: test_accounts.protocol.forester.pubkey(), address_tree_data: vec![], state_tree_data: vec![], + compressible_config: None, } } diff --git a/forester/tests/priority_fee_test.rs b/forester/tests/priority_fee_test.rs index 901961e455..5656781122 100644 --- a/forester/tests/priority_fee_test.rs +++ b/forester/tests/priority_fee_test.rs @@ -42,6 +42,8 @@ async fn test_priority_fee_request() { prover_update_url: None, prover_address_append_url: None, prover_api_key: None, + prover_polling_interval_ms: None, + prover_max_wait_time_secs: None, payer: Some( std::env::var("FORESTER_PAYER").expect("FORESTER_PAYER must be set in environment"), ), diff --git a/forester/tests/test_utils.rs b/forester/tests/test_utils.rs index c572c85ab8..1471cfe0cc 100644 --- a/forester/tests/test_utils.rs +++ b/forester/tests/test_utils.rs @@ -88,6 +88,8 @@ pub fn forester_config() -> ForesterConfig { prover_update_url: None, prover_address_append_url: None, prover_api_key: None, + prover_polling_interval: None, + prover_max_wait_time: None, photon_api_key: None, photon_grpc_url: None, pushgateway_url: None, diff --git a/program-tests/merkle-tree/src/lib.rs b/program-tests/merkle-tree/src/lib.rs index b60c5b8032..88416a4cea 100644 --- a/program-tests/merkle-tree/src/lib.rs +++ b/program-tests/merkle-tree/src/lib.rs @@ -18,6 +18,8 @@ pub enum ReferenceMerkleTreeError { IndexedArray(#[from] IndexedArrayError), #[error("RootHistoryArrayLenNotSet")] RootHistoryArrayLenNotSet, + #[error("Level {level} exceeds tree height {height}")] + InvalidLevel { level: usize, height: usize }, } #[derive(Debug, Clone)] @@ -373,6 +375,52 @@ where .cloned() .ok_or(ReferenceMerkleTreeError::LeafDoesNotExist(index)) } + + /// Insert a node at a specific level and position without recomputing the tree. + /// Used for reconstructing tree state from stored nodes. + /// + /// The `node_index` encodes both level and position: + /// - Upper 8 bits (>> 56): level in the tree + /// - Lower 56 bits (& 0x00FFFFFFFFFFFFFF): position within that level + pub fn insert_node( + &mut self, + node_index: u64, + hash: [u8; 32], + ) -> Result<(), ReferenceMerkleTreeError> { + let level = (node_index >> 56) as usize; + let position = (node_index & 0x00FFFFFFFFFFFFFF) as usize; + + if level >= self.layers.len() { + return Err(ReferenceMerkleTreeError::InvalidLevel { + level, + height: self.layers.len(), + }); + } + + if self.layers[level].len() <= position { + self.layers[level].resize(position + 1, H::zero_bytes()[level]); + } + + self.layers[level][position] = hash; + Ok(()) + } + + /// Insert a leaf at a specific index without recomputing the tree. + /// Used for reconstructing tree state from stored leaves. + pub fn insert_leaf(&mut self, leaf_index: usize, hash: [u8; 32]) { + if self.layers[0].len() <= leaf_index { + self.layers[0].resize(leaf_index + 1, H::zero_bytes()[0]); + } + self.layers[0][leaf_index] = hash; + } + + /// Ensure a layer has capacity for at least `min_index + 1` elements. + /// Resizes with zero bytes if needed. + pub fn ensure_layer_capacity(&mut self, level: usize, min_index: usize) { + if level < self.layers.len() && self.layers[level].len() <= min_index { + self.layers[level].resize(min_index + 1, H::zero_bytes()[level]); + } + } } #[cfg(test)] diff --git a/program-tests/utils/src/e2e_test_env.rs b/program-tests/utils/src/e2e_test_env.rs index 09ea74ed6a..6f6fa1b7ea 100644 --- a/program-tests/utils/src/e2e_test_env.rs +++ b/program-tests/utils/src/e2e_test_env.rs @@ -745,16 +745,18 @@ where .indexer .get_queue_elements( merkle_tree_pubkey.to_bytes(), - None, - Some(batch.batch_size as u16), - None, - None, + light_client::indexer::QueueElementsV2Options::default() + .with_address_queue(None, Some(batch.batch_size as u16)), None, ) .await .unwrap(); - let addresses = - addresses.value.output_queue_elements.unwrap_or_default().iter().map(|x| x.account_hash).collect::>(); + let addresses = addresses + .value + .address_queue + .as_ref() + .map(|aq| aq.addresses.clone()) + .unwrap_or_default(); // // local_leaves_hash_chain is only used for a test assertion. // let local_nullifier_hash_chain = create_hash_chain_from_array(&addresses); // assert_eq!(leaves_hash_chain, local_nullifier_hash_chain); diff --git a/program-tests/utils/src/test_batch_forester.rs b/program-tests/utils/src/test_batch_forester.rs index f737207ad9..102c632d77 100644 --- a/program-tests/utils/src/test_batch_forester.rs +++ b/program-tests/utils/src/test_batch_forester.rs @@ -167,7 +167,7 @@ pub async fn create_append_batch_ix_data( let proof_client = ProofClient::local(); let inputs_json = BatchAppendInputsJson::from_inputs(&circuit_inputs).to_string(); - match proof_client.generate_proof(inputs_json, "append").await { + match proof_client.generate_proof(inputs_json).await { Ok(compressed_proof) => ( compressed_proof, bigint_to_be_bytes_array::<32>(&circuit_inputs.new_root.to_biguint().unwrap()) @@ -300,7 +300,7 @@ pub async fn get_batched_nullify_ix_data( assert_eq!(circuit_inputs_new_root, new_root); - let proof = match proof_client.generate_proof(inputs_json, "update").await { + let proof = match proof_client.generate_proof(inputs_json).await { Ok(compressed_proof) => compressed_proof, Err(e) => { println!("Failed to generate proof: {:?}", e); @@ -653,21 +653,18 @@ pub async fn create_batch_update_address_tree_instruction_data_with_proof>(); + .address_queue + .as_ref() + .map(|aq| aq.addresses.clone()) + .unwrap_or_default(); // // local_leaves_hash_chain is only used for a test assertion. // let local_nullifier_hash_chain = create_hash_chain_from_slice(addresses.as_slice()).unwrap(); // assert_eq!(leaves_hash_chain, local_nullifier_hash_chain); @@ -736,10 +733,7 @@ pub async fn create_batch_update_address_tree_instruction_data_with_proof(&inputs.new_root).unwrap(); let inputs_json = to_json(&inputs); - match proof_client - .generate_proof(inputs_json, "address-append") - .await - { + match proof_client.generate_proof(inputs_json).await { Ok(compressed_proof) => { let instruction_data = InstructionDataBatchNullifyInputs { new_root: circuit_inputs_new_root, diff --git a/prover/client/Cargo.toml b/prover/client/Cargo.toml index 0123c73582..c98917d7ff 100644 --- a/prover/client/Cargo.toml +++ b/prover/client/Cargo.toml @@ -12,6 +12,7 @@ devenv = [] [dependencies] +light-compressed-account = { workspace = true, default-features = false } light-hasher = { workspace = true, features = ["poseidon"] } light-indexed-array = { workspace = true } light-sparse-merkle-tree = { workspace = true } diff --git a/prover/client/src/proof.rs b/prover/client/src/proof.rs index 66c86ed19d..f6b8f47ba3 100644 --- a/prover/client/src/proof.rs +++ b/prover/client/src/proof.rs @@ -5,6 +5,7 @@ type G1 = ark_bn254::g1::G1Affine; use std::ops::Neg; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}; +use light_compressed_account::instruction_data::compressed_proof::CompressedProof; use num_traits::Num; use solana_bn254::compression::prelude::{ alt_bn128_g1_compress, alt_bn128_g1_decompress, alt_bn128_g2_compress, alt_bn128_g2_decompress, @@ -16,13 +17,23 @@ pub struct ProofResult { pub public_inputs: Vec<[u8; 32]>, } -#[derive(Debug)] +#[derive(Debug, Clone, Copy)] pub struct ProofCompressed { pub a: [u8; 32], pub b: [u8; 64], pub c: [u8; 32], } +impl From for CompressedProof { + fn from(proof: ProofCompressed) -> Self { + CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + } + } +} + impl ProofCompressed { pub fn try_decompress(&self) -> Result { let proof_a = alt_bn128_g1_decompress(&self.a)?; diff --git a/prover/client/src/proof_client.rs b/prover/client/src/proof_client.rs index a4f0de334d..fe1d708baa 100644 --- a/prover/client/src/proof_client.rs +++ b/prover/client/src/proof_client.rs @@ -3,7 +3,7 @@ use std::time::{Duration, Instant}; use reqwest::Client; use serde::Deserialize; use tokio::time::sleep; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, info, trace, warn}; use crate::{ constants::PROVE_PATH, @@ -84,7 +84,6 @@ impl ProofClient { pub async fn generate_proof( &self, inputs_json: String, - circuit_type: &str, ) -> Result { let start_time = Instant::now(); let mut retries = 0; @@ -98,10 +97,7 @@ impl ProofClient { ))); } - match self - .try_generate_proof(&inputs_json, circuit_type, retries + 1, elapsed) - .await - { + match self.try_generate_proof(&inputs_json, elapsed).await { Ok(proof) => return Ok(proof), Err(err) if self.should_retry(&err, retries, elapsed) => { retries += 1; @@ -135,15 +131,8 @@ impl ProofClient { async fn try_generate_proof( &self, inputs_json: &str, - circuit_type: &str, - attempt: u32, elapsed: Duration, ) -> Result { - debug!( - "Generating proof for circuit type: {} (attempt {}, elapsed: {:?})", - circuit_type, attempt, elapsed - ); - let response = self.send_proof_request(inputs_json).await?; let status_code = response.status(); let response_text = response.text().await.map_err(|e| { @@ -183,9 +172,6 @@ impl ProofClient { } fn log_response(&self, status_code: reqwest::StatusCode, response_text: &str) { - debug!("Response status: {}", status_code); - debug!("Response text: {}", response_text); - if !status_code.is_success() { error!("HTTP error: status={}, body={}", status_code, response_text); } @@ -198,12 +184,8 @@ impl ProofClient { start_elapsed: Duration, ) -> Result { match status_code { - reqwest::StatusCode::OK => { - debug!("Received synchronous proof response"); - self.parse_proof_from_json(response_text) - } + reqwest::StatusCode::OK => self.parse_proof_from_json(response_text), reqwest::StatusCode::ACCEPTED => { - debug!("Received asynchronous job response"); let job_response = self.parse_job_response(response_text)?; self.handle_async_job(job_response, start_elapsed).await } @@ -255,6 +237,13 @@ impl ProofClient { fn should_retry(&self, error: &ProverClientError, retries: u32, elapsed: Duration) -> bool { let error_str = error.to_string(); + + let is_constraint_error = + error_str.contains("constraint") || error_str.contains("is not satisfied"); + if is_constraint_error { + return false; + } + let is_retryable_error = error_str.contains("job_not_found") || error_str.contains("connection") || error_str.contains("timeout") @@ -264,7 +253,7 @@ impl ProofClient { let should_retry = retries < MAX_RETRIES && is_retryable_error && elapsed < self.max_wait_time; - debug!( + trace!( "Retry check: retries={}/{}, is_retryable_error={}, elapsed={:?}/{:?}, should_retry={}, error={}", retries, MAX_RETRIES, is_retryable_error, elapsed, self.max_wait_time, should_retry, error_str ); @@ -297,9 +286,12 @@ impl ProofClient { ))); } - debug!( + trace!( "Poll #{} for job {} at total elapsed time {:?} (polling: {:?})", - poll_count, job_id, total_elapsed, poll_elapsed + poll_count, + job_id, + total_elapsed, + poll_elapsed ); match self.poll_job_status(&status_url, job_id, poll_count).await { @@ -336,9 +328,12 @@ impl ProofClient { Err(err) if self.is_transient_polling_error(&err) => { transient_error_count += 1; - debug!( + trace!( "Transient polling error for job {}: attempt {}/{}, error: {}", - job_id, transient_error_count, MAX_RETRIES, err + job_id, + transient_error_count, + MAX_RETRIES, + err ); if transient_error_count >= MAX_RETRIES { @@ -394,7 +389,7 @@ impl ProofClient { let status_code = response.status(); let response_text = response.text().await.unwrap_or_default(); - debug!( + trace!( "Poll #{} for job {}: status={}, body_len={}", poll_count, job_id, @@ -428,7 +423,7 @@ impl ProofClient { elapsed: Duration, poll_count: u32, ) -> Result, ProverClientError> { - info!( + trace!( "Poll #{} for job {}: status='{}', message='{}'", poll_count, job_id, @@ -459,9 +454,13 @@ impl ProofClient { ))) } "processing" | "queued" => { - debug!( + trace!( "Job {} still {} after {:?} (poll #{}), waiting {:?} before next check", - job_id, status_response.status, elapsed, poll_count, self.polling_interval + job_id, + status_response.status, + elapsed, + poll_count, + self.polling_interval ); Ok(None) } @@ -482,7 +481,7 @@ impl ProofClient { ) -> Result { match result { Some(result) => { - debug!("Job {} has result, parsing proof JSON", job_id); + trace!("Job {} has result, parsing proof JSON", job_id); let proof_json = serde_json::to_string(&result).map_err(|e| { error!("Failed to serialize result for job {}: {}", job_id, e); ProverClientError::ProverServerError("Cannot serialize result".to_string()) @@ -528,7 +527,7 @@ impl ProofClient { ) -> Result<(ProofCompressed, [u8; 32]), ProverClientError> { let new_root = light_hasher::bigint::bigint_to_be_bytes_array::<32>(&inputs.new_root)?; let inputs_json = to_json(&inputs); - let proof = self.generate_proof(inputs_json, "address-append").await?; + let proof = self.generate_proof(inputs_json).await?; Ok((proof, new_root)) } @@ -540,19 +539,19 @@ impl ProofClient { &circuit_inputs.new_root.to_biguint().unwrap(), )?; let inputs_json = BatchAppendInputsJson::from_inputs(&circuit_inputs).to_string(); - let proof = self.generate_proof(inputs_json, "append").await?; + let proof = self.generate_proof(inputs_json).await?; Ok((proof, new_root)) } pub async fn generate_batch_update_proof( &self, - inputs: BatchUpdateCircuitInputs, + circuit_inputs: BatchUpdateCircuitInputs, ) -> Result<(ProofCompressed, [u8; 32]), ProverClientError> { let new_root = light_hasher::bigint::bigint_to_be_bytes_array::<32>( - &inputs.new_root.to_biguint().unwrap(), + &circuit_inputs.new_root.to_biguint().unwrap(), )?; - let json_str = update_inputs_string(&inputs); - let proof = self.generate_proof(json_str, "update").await?; + let json_str = update_inputs_string(&circuit_inputs); + let proof = self.generate_proof(json_str).await?; Ok((proof, new_root)) } } diff --git a/prover/client/src/proof_types/batch_address_append/proof_inputs.rs b/prover/client/src/proof_types/batch_address_append/proof_inputs.rs index baeba1bb03..64153445a0 100644 --- a/prover/client/src/proof_types/batch_address_append/proof_inputs.rs +++ b/prover/client/src/proof_types/batch_address_append/proof_inputs.rs @@ -29,6 +29,84 @@ pub struct BatchAddressAppendInputs { pub tree_height: usize, } +impl BatchAddressAppendInputs { + #[allow(clippy::too_many_arguments)] + pub fn new( + batch_size: usize, + leaves_hashchain: [u8; 32], + low_element_values: Vec<[u8; 32]>, + low_element_indices: Vec, + low_element_next_indices: Vec, + low_element_next_values: Vec<[u8; 32]>, + low_element_proofs: Vec>, + new_element_values: Vec<[u8; 32]>, + new_element_proofs: Vec>, + new_root: [u8; 32], + old_root: [u8; 32], + start_index: usize, + ) -> Result { + let hash_chain_inputs = [ + old_root, + new_root, + leaves_hashchain, + bigint_to_be_bytes_array::<32>(&start_index.into()).unwrap(), + ]; + let public_input_hash = create_hash_chain_from_array(hash_chain_inputs)?; + + let low_element_proofs_bigint: Vec> = low_element_proofs + .into_iter() + .map(|proof| { + proof + .into_iter() + .map(|p| BigUint::from_bytes_be(&p)) + .collect() + }) + .collect(); + + let new_element_proofs_bigint: Vec> = new_element_proofs + .into_iter() + .map(|proof| { + proof + .into_iter() + .map(|p| BigUint::from_bytes_be(&p)) + .collect() + }) + .collect(); + + Ok(Self { + batch_size, + hashchain_hash: BigUint::from_bytes_be(&leaves_hashchain), + low_element_values: low_element_values + .iter() + .map(|v| BigUint::from_bytes_be(v)) + .collect(), + low_element_indices: low_element_indices + .iter() + .map(|&i| BigUint::from(i)) + .collect(), + low_element_next_indices: low_element_next_indices + .iter() + .map(|&i| BigUint::from(i)) + .collect(), + low_element_next_values: low_element_next_values + .iter() + .map(|v| BigUint::from_bytes_be(v)) + .collect(), + low_element_proofs: low_element_proofs_bigint, + new_element_values: new_element_values + .iter() + .map(|v| BigUint::from_bytes_be(v)) + .collect(), + new_element_proofs: new_element_proofs_bigint, + new_root: BigUint::from_bytes_be(&new_root), + old_root: BigUint::from_bytes_be(&old_root), + public_input_hash: BigUint::from_bytes_be(&public_input_hash), + start_index, + tree_height: HEIGHT, + }) + } +} + #[allow(clippy::too_many_arguments)] pub fn get_batch_address_append_circuit_inputs( next_index: usize, diff --git a/prover/client/src/proof_types/batch_append/proof_inputs.rs b/prover/client/src/proof_types/batch_append/proof_inputs.rs index c85681d719..bec593ec1b 100644 --- a/prover/client/src/proof_types/batch_append/proof_inputs.rs +++ b/prover/client/src/proof_types/batch_append/proof_inputs.rs @@ -7,6 +7,7 @@ use tracing::{error, info}; use crate::{ errors::ProverClientError, helpers::{bigint_to_u8_32, compute_root_from_merkle_proof}, + proof_types::batch_update::BatchTreeUpdateResult, }; #[derive(Debug, Clone, Serialize)] @@ -27,6 +28,87 @@ impl BatchAppendsCircuitInputs { pub fn public_inputs_arr(&self) -> [u8; 32] { bigint_to_u8_32(&self.public_input_hash).unwrap() } + + pub fn new( + tree_result: BatchTreeUpdateResult, + start_index: u32, + leaves: Vec<[u8; 32]>, + leaves_hashchain: [u8; 32], + batch_size: u32, + ) -> Result { + let expected_len = batch_size as usize; + if leaves.len() != expected_len { + return Err(ProverClientError::GenericError(format!( + "leaves length mismatch: expected {}, got {}", + expected_len, + leaves.len() + ))); + } + if tree_result.old_leaves.len() != expected_len { + return Err(ProverClientError::GenericError(format!( + "old_leaves length mismatch: expected {}, got {}", + expected_len, + tree_result.old_leaves.len() + ))); + } + if tree_result.merkle_proofs.len() != expected_len { + return Err(ProverClientError::GenericError(format!( + "merkle_proofs length mismatch: expected {}, got {}", + expected_len, + tree_result.merkle_proofs.len() + ))); + } + + let mut circuit_merkle_proofs = Vec::with_capacity(batch_size as usize); + + for merkle_proof in tree_result.merkle_proofs.into_iter() { + let proof_slice = merkle_proof.as_slice(); + let proof_len = proof_slice.len(); + let merkle_proof_array: [[u8; 32]; HEIGHT] = proof_slice.try_into().map_err(|_| { + ProverClientError::GenericError(format!( + "Invalid merkle proof length: got {}, expected {}", + proof_len, HEIGHT + )) + })?; + + circuit_merkle_proofs.push( + merkle_proof_array + .iter() + .map(|proof_elem| BigInt::from_bytes_be(Sign::Plus, proof_elem)) + .collect(), + ); + } + + let mut start_index_bytes = [0u8; 32]; + start_index_bytes[28..].copy_from_slice(start_index.to_be_bytes().as_slice()); + + let public_input_hash = create_hash_chain_from_array([ + tree_result.old_root, + tree_result.new_root, + leaves_hashchain, + start_index_bytes, + ])?; + + Ok(Self { + public_input_hash: BigInt::from_bytes_be(Sign::Plus, &public_input_hash), + old_root: BigInt::from_bytes_be(Sign::Plus, &tree_result.old_root), + new_root: BigInt::from_bytes_be(Sign::Plus, &tree_result.new_root), + leaves_hashchain_hash: BigInt::from_bytes_be(Sign::Plus, &leaves_hashchain), + start_index, + old_leaves: tree_result + .old_leaves + .iter() + .map(|leaf| BigInt::from_bytes_be(Sign::Plus, leaf)) + .collect(), + leaves: leaves + .iter() + .map(|leaf| BigInt::from_bytes_be(Sign::Plus, leaf)) + .collect(), + merkle_proofs: circuit_merkle_proofs, + height: HEIGHT as u32, + batch_size, + }) + } } #[allow(clippy::too_many_arguments)] diff --git a/prover/client/src/proof_types/batch_update/proof_inputs.rs b/prover/client/src/proof_types/batch_update/proof_inputs.rs index d376fd0da2..7c3f1be65e 100644 --- a/prover/client/src/proof_types/batch_update/proof_inputs.rs +++ b/prover/client/src/proof_types/batch_update/proof_inputs.rs @@ -7,6 +7,17 @@ use crate::{ helpers::{bigint_to_u8_32, compute_root_from_merkle_proof}, }; +/// Result of batch tree updates, containing proofs and root transitions. +/// This mirrors `forester_utils::staging_tree::BatchUpdateResult` but is defined +/// here to avoid a dependency cycle. +#[derive(Clone, Debug)] +pub struct BatchTreeUpdateResult { + pub old_leaves: Vec<[u8; 32]>, + pub merkle_proofs: Vec>, + pub old_root: [u8; 32], + pub new_root: [u8; 32], +} + #[derive(Clone, Debug)] pub struct BatchUpdateCircuitInputs { pub public_input_hash: BigInt, @@ -26,6 +37,78 @@ impl BatchUpdateCircuitInputs { pub fn public_inputs_arr(&self) -> [u8; 32] { bigint_to_u8_32(&self.public_input_hash).unwrap() } + + pub fn new( + tree_result: BatchTreeUpdateResult, + tx_hashes: Vec<[u8; 32]>, + leaves: Vec<[u8; 32]>, + leaves_hashchain: [u8; 32], + path_indices: Vec, + batch_size: u32, + ) -> Result { + let batch_size_usize = batch_size as usize; + if leaves.len() != batch_size_usize + || tree_result.old_leaves.len() != batch_size_usize + || tree_result.merkle_proofs.len() != batch_size_usize + || tx_hashes.len() != batch_size_usize + || path_indices.len() != batch_size_usize + { + return Err(ProverClientError::GenericError(format!( + "Input vector length mismatch: leaves={}, old_leaves={}, merkle_proofs={}, tx_hashes={}, path_indices={}, expected batch_size={}", + leaves.len(), tree_result.old_leaves.len(), tree_result.merkle_proofs.len(), tx_hashes.len(), path_indices.len(), batch_size + ))); + } + + let mut circuit_merkle_proofs = Vec::with_capacity(batch_size_usize); + + for merkle_proof in tree_result.merkle_proofs.into_iter() { + let proof_len = merkle_proof.len(); + let merkle_proof_array: [[u8; 32]; HEIGHT] = + merkle_proof.as_slice().try_into().map_err(|_| { + ProverClientError::GenericError(format!( + "Invalid merkle proof length: got {}, expected {}", + proof_len, HEIGHT + )) + })?; + + circuit_merkle_proofs.push( + merkle_proof_array + .iter() + .map(|proof_elem| BigInt::from_bytes_be(Sign::Plus, proof_elem)) + .collect(), + ); + } + + let public_input_hash = create_hash_chain_from_array([ + tree_result.old_root, + tree_result.new_root, + leaves_hashchain, + ])?; + + Ok(Self { + public_input_hash: BigInt::from_bytes_be(Sign::Plus, &public_input_hash), + old_root: BigInt::from_bytes_be(Sign::Plus, &tree_result.old_root), + new_root: BigInt::from_bytes_be(Sign::Plus, &tree_result.new_root), + tx_hashes: tx_hashes + .iter() + .map(|tx| BigInt::from_bytes_be(Sign::Plus, tx)) + .collect(), + leaves_hashchain_hash: BigInt::from_bytes_be(Sign::Plus, &leaves_hashchain), + leaves: leaves + .iter() + .map(|leaf| BigInt::from_bytes_be(Sign::Plus, leaf)) + .collect(), + old_leaves: tree_result + .old_leaves + .iter() + .map(|leaf| BigInt::from_bytes_be(Sign::Plus, leaf)) + .collect(), + merkle_proofs: circuit_merkle_proofs, + path_indices, + height: HEIGHT as u32, + batch_size, + }) + } } #[derive(Clone, Debug)] diff --git a/prover/server/go.mod b/prover/server/go.mod index 5a828bb41b..f2be71318d 100644 --- a/prover/server/go.mod +++ b/prover/server/go.mod @@ -1,6 +1,6 @@ module light/light-prover -go 1.25.1 +go 1.25.4 require ( github.com/consensys/gnark v0.14.0 diff --git a/scripts/devenv/versions.sh b/scripts/devenv/versions.sh index 7c0d6f9b7f..db2510de24 100755 --- a/scripts/devenv/versions.sh +++ b/scripts/devenv/versions.sh @@ -13,7 +13,7 @@ export SOLANA_VERSION="2.2.15" export ANCHOR_VERSION="0.31.1" export JQ_VERSION="1.8.0" export PHOTON_VERSION="0.51.0" -export PHOTON_COMMIT="2cffb6132a21ce148268129ccbbb24c1f3cabc61" +export PHOTON_COMMIT="4de87f7d99a8ad95c65c3bd40a7d7c1721aa0293" export REDIS_VERSION="8.0.1" export ANCHOR_TAG="anchor-v${ANCHOR_VERSION}" diff --git a/sdk-libs/client/src/indexer/indexer_trait.rs b/sdk-libs/client/src/indexer/indexer_trait.rs index 81d1fbf99d..fcb2931749 100644 --- a/sdk-libs/client/src/indexer/indexer_trait.rs +++ b/sdk-libs/client/src/indexer/indexer_trait.rs @@ -4,13 +4,13 @@ use solana_pubkey::Pubkey; use super::{ response::{Items, ItemsWithCursor, Response}, types::{ - CompressedAccount, CompressedTokenAccount, OwnerBalance, QueueElementsResult, + CompressedAccount, CompressedTokenAccount, OwnerBalance, QueueElementsV2Result, QueueInfoResult, SignatureWithMetadata, TokenBalance, ValidityProofWithContext, }, Address, AddressWithTree, BatchAddressUpdateIndexerResponse, GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, IndexerError, IndexerRpcConfig, MerkleProof, NewAddressProofWithContext, PaginatedOptions, - RetryConfig, + QueueElementsV2Options, RetryConfig, }; // TODO: remove all references in input types. #[async_trait] @@ -183,24 +183,6 @@ pub trait Indexer: std::marker::Send + std::marker::Sync { config: Option, ) -> Result, IndexerError>; - // TODO: in different pr: - // replace num_elements & start_queue_index with PaginatedOptions - // - return type should be ItemsWithCursor - /// Returns queue elements from the queue with the given merkle tree pubkey. - /// Can fetch from output queue (append), input queue (nullify), or both atomically. - /// For input queues account compression program does not store queue elements in the - /// account data but only emits these in the public transaction event. The - /// indexer needs the queue elements to create batch update proofs. - async fn get_queue_elements( - &mut self, - merkle_tree_pubkey: [u8; 32], - output_queue_start_index: Option, - output_queue_limit: Option, - input_queue_start_index: Option, - input_queue_limit: Option, - config: Option, - ) -> Result, IndexerError>; - /// Returns information about all queues in the system. /// Includes tree pubkey, queue pubkey, queue type, and queue size for each queue. async fn get_queue_info( @@ -208,6 +190,14 @@ pub trait Indexer: std::marker::Send + std::marker::Sync { config: Option, ) -> Result, IndexerError>; + /// Returns queue elements with deduplicated nodes for efficient staging tree construction. + /// Supports output queue, input queue, and address queue. + async fn get_queue_elements( + &mut self, + merkle_tree_pubkey: [u8; 32], + options: QueueElementsV2Options, + config: Option, + ) -> Result, IndexerError>; async fn get_subtrees( &self, merkle_tree_pubkey: [u8; 32], diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs index b03dcedac7..6b86a41559 100644 --- a/sdk-libs/client/src/indexer/mod.rs +++ b/sdk-libs/client/src/indexer/mod.rs @@ -14,11 +14,12 @@ pub use error::IndexerError; pub use indexer_trait::Indexer; pub use response::{Context, Items, ItemsWithCursor, Response}; pub use types::{ - AccountProofInputs, Address, AddressMerkleTreeAccounts, AddressProofInputs, AddressQueueIndex, - AddressWithTree, BatchAddressUpdateIndexerResponse, CompressedAccount, CompressedTokenAccount, - Hash, MerkleProof, MerkleProofWithContext, NewAddressProofWithContext, NextTreeInfo, - OwnerBalance, ProofOfLeaf, QueueElementsResult, QueueInfo, QueueInfoResult, RootIndex, - SignatureWithMetadata, StateMerkleTreeAccounts, TokenBalance, TreeInfo, + AccountProofInputs, Address, AddressMerkleTreeAccounts, AddressProofInputs, AddressQueueDataV2, + AddressQueueIndex, AddressWithTree, BatchAddressUpdateIndexerResponse, CompressedAccount, + CompressedTokenAccount, Hash, InputQueueDataV2, MerkleProof, MerkleProofWithContext, + NewAddressProofWithContext, NextTreeInfo, OutputQueueDataV2, OwnerBalance, ProofOfLeaf, + QueueElementsResult, QueueElementsV2Result, QueueInfo, QueueInfoResult, RootIndex, + SignatureWithMetadata, StateMerkleTreeAccounts, StateQueueDataV2, TokenBalance, TreeInfo, ValidityProofWithContext, }; mod options; diff --git a/sdk-libs/client/src/indexer/options.rs b/sdk-libs/client/src/indexer/options.rs index fdf3d3ef0d..dbbf699fb5 100644 --- a/sdk-libs/client/src/indexer/options.rs +++ b/sdk-libs/client/src/indexer/options.rs @@ -59,3 +59,56 @@ impl GetCompressedAccountsByOwnerConfig { .map(|filters| filters.iter().map(|f| f.clone().into()).collect()) } } + +/// Options for fetching queue elements (V2 with deduplicated nodes and address queue support). +#[derive(Debug, Clone, Default)] +pub struct QueueElementsV2Options { + pub output_queue_start_index: Option, + pub output_queue_limit: Option, + pub output_queue_zkp_batch_size: Option, + pub input_queue_start_index: Option, + pub input_queue_limit: Option, + pub input_queue_zkp_batch_size: Option, + pub address_queue_start_index: Option, + pub address_queue_limit: Option, + pub address_queue_zkp_batch_size: Option, +} + +impl QueueElementsV2Options { + pub fn new() -> Self { + Self::default() + } + + pub fn with_output_queue(mut self, start_index: Option, limit: Option) -> Self { + self.output_queue_start_index = start_index; + self.output_queue_limit = limit; + self + } + + pub fn with_output_queue_batch_size(mut self, batch_size: Option) -> Self { + self.output_queue_zkp_batch_size = batch_size; + self + } + + pub fn with_input_queue(mut self, start_index: Option, limit: Option) -> Self { + self.input_queue_start_index = start_index; + self.input_queue_limit = limit; + self + } + + pub fn with_input_queue_batch_size(mut self, batch_size: Option) -> Self { + self.input_queue_zkp_batch_size = batch_size; + self + } + + pub fn with_address_queue(mut self, start_index: Option, limit: Option) -> Self { + self.address_queue_start_index = start_index; + self.address_queue_limit = limit; + self + } + + pub fn with_address_queue_batch_size(mut self, batch_size: Option) -> Self { + self.address_queue_zkp_batch_size = batch_size; + self + } +} diff --git a/sdk-libs/client/src/indexer/photon_indexer.rs b/sdk-libs/client/src/indexer/photon_indexer.rs index 70970dd2a6..60c5938557 100644 --- a/sdk-libs/client/src/indexer/photon_indexer.rs +++ b/sdk-libs/client/src/indexer/photon_indexer.rs @@ -11,8 +11,8 @@ use tracing::{error, trace, warn}; use super::{ types::{ - CompressedAccount, CompressedTokenAccount, OwnerBalance, QueueElementsResult, - SignatureWithMetadata, TokenBalance, + CompressedAccount, CompressedTokenAccount, OwnerBalance, SignatureWithMetadata, + TokenBalance, }, BatchAddressUpdateIndexerResponse, }; @@ -1577,166 +1577,6 @@ impl Indexer for PhotonIndexer { } } - async fn get_queue_elements( - &mut self, - _pubkey: [u8; 32], - _output_queue_start_index: Option, - _output_queue_limit: Option, - _input_queue_start_index: Option, - _input_queue_limit: Option, - _config: Option, - ) -> Result, IndexerError> { - #[cfg(not(feature = "v2"))] - unimplemented!("get_queue_elements"); - #[cfg(feature = "v2")] - { - use super::MerkleProofWithContext; - let pubkey = _pubkey; - let output_queue_start_index = _output_queue_start_index; - let output_queue_limit = _output_queue_limit; - let input_queue_start_index = _input_queue_start_index; - let input_queue_limit = _input_queue_limit; - let config = _config.unwrap_or_default(); - self.retry(config.retry_config, || async { - let request: photon_api::models::GetQueueElementsPostRequest = - photon_api::models::GetQueueElementsPostRequest { - params: Box::from(photon_api::models::GetQueueElementsPostRequestParams { - tree: bs58::encode(pubkey).into_string(), - output_queue_start_index, - output_queue_limit, - input_queue_start_index, - input_queue_limit, - }), - ..Default::default() - }; - - let result = photon_api::apis::default_api::get_queue_elements_post( - &self.configuration, - request, - ) - .await; - let result: Result, IndexerError> = match result { - Ok(api_response) => match api_response.result { - Some(api_result) => { - if api_result.context.slot < config.slot { - return Err(IndexerError::IndexerNotSyncedToSlot); - } - - // Parse output queue elements - let output_queue_elements = api_result - .output_queue_elements - .map(|elements| { - elements - .iter() - .map(|x| -> Result<_, IndexerError> { - let proof: Vec = x - .proof - .iter() - .map(|p| Hash::from_base58(p)) - .collect::, _>>()?; - let root = Hash::from_base58(&x.root)?; - let leaf = Hash::from_base58(&x.leaf)?; - let merkle_tree = Hash::from_base58(&x.tree)?; - let tx_hash = x - .tx_hash - .as_ref() - .map(|h| Hash::from_base58(h)) - .transpose()?; - let account_hash = Hash::from_base58(&x.account_hash)?; - - Ok(MerkleProofWithContext { - proof, - root, - leaf_index: x.leaf_index, - leaf, - merkle_tree, - root_seq: x.root_seq, - tx_hash, - account_hash, - }) - }) - .collect::, _>>() - }) - .transpose()?; - - // Parse input queue elements - let input_queue_elements = api_result - .input_queue_elements - .map(|elements| { - elements - .iter() - .map(|x| -> Result<_, IndexerError> { - let proof: Vec = x - .proof - .iter() - .map(|p| Hash::from_base58(p)) - .collect::, _>>()?; - let root = Hash::from_base58(&x.root)?; - let leaf = Hash::from_base58(&x.leaf)?; - let merkle_tree = Hash::from_base58(&x.tree)?; - let tx_hash = x - .tx_hash - .as_ref() - .map(|h| Hash::from_base58(h)) - .transpose()?; - let account_hash = Hash::from_base58(&x.account_hash)?; - - Ok(MerkleProofWithContext { - proof, - root, - leaf_index: x.leaf_index, - leaf, - merkle_tree, - root_seq: x.root_seq, - tx_hash, - account_hash, - }) - }) - .collect::, _>>() - }) - .transpose()?; - - Ok(Response { - context: Context { - slot: api_result.context.slot, - }, - value: QueueElementsResult { - output_queue_elements, - output_queue_index: api_result.output_queue_index, - input_queue_elements, - input_queue_index: api_result.input_queue_index, - }, - }) - } - None => { - let error = - api_response - .error - .ok_or_else(|| IndexerError::PhotonError { - context: "get_queue_elements".to_string(), - message: "No error details provided".to_string(), - })?; - - Err(IndexerError::PhotonError { - context: "get_queue_elements".to_string(), - message: error - .message - .unwrap_or_else(|| "Unknown error".to_string()), - }) - } - }, - Err(e) => Err(IndexerError::PhotonError { - context: "get_queue_elements".to_string(), - message: e.to_string(), - }), - }; - - result - }) - .await - } - } - async fn get_queue_info( &self, config: Option, @@ -1797,6 +1637,221 @@ impl Indexer for PhotonIndexer { .await } + async fn get_queue_elements( + &mut self, + merkle_tree_pubkey: [u8; 32], + options: super::QueueElementsV2Options, + config: Option, + ) -> Result, IndexerError> { + #[cfg(not(feature = "v2"))] + unimplemented!(); + + #[cfg(feature = "v2")] + { + let config = config.unwrap_or_default(); + self.retry(config.retry_config, || async { + let params = photon_api::models::GetQueueElementsV2PostRequestParams { + tree: bs58::encode(merkle_tree_pubkey).into_string(), + output_queue_start_index: options.output_queue_start_index, + output_queue_limit: options.output_queue_limit, + output_queue_zkp_batch_size: options.output_queue_zkp_batch_size, + input_queue_start_index: options.input_queue_start_index, + input_queue_limit: options.input_queue_limit, + input_queue_zkp_batch_size: options.input_queue_zkp_batch_size, + address_queue_start_index: options.address_queue_start_index, + address_queue_limit: options.address_queue_limit, + address_queue_zkp_batch_size: options.address_queue_zkp_batch_size, + }; + + let request = photon_api::models::GetQueueElementsV2PostRequest { + params: Box::new(params), + ..Default::default() + }; + + let result = photon_api::apis::default_api::get_queue_elements_v2_post( + &self.configuration, + request, + ) + .await?; + + let api_response = Self::extract_result_with_error_check( + "get_queue_elements", + result.error, + result.result.map(|r| *r), + )?; + + if api_response.context.slot < config.slot { + return Err(IndexerError::IndexerNotSyncedToSlot); + } + + let state_queue = if let Some(state) = api_response.state_queue { + let node_hashes: Result, IndexerError> = state + .node_hashes + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + let initial_root = Hash::from_base58(&state.initial_root)?; + + let output_queue = if let Some(output) = state.output_queue { + let account_hashes: Result, IndexerError> = output + .account_hashes + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + let old_leaves: Result, IndexerError> = + output.leaves.iter().map(|h| Hash::from_base58(h)).collect(); + let leaves_hash_chains: Result, IndexerError> = output + .leaves_hash_chains + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + + Some(super::OutputQueueDataV2 { + leaf_indices: output.leaf_indices, + account_hashes: account_hashes?, + old_leaves: old_leaves?, + first_queue_index: output.first_queue_index, + next_index: output.next_index, + leaves_hash_chains: leaves_hash_chains?, + }) + } else { + None + }; + + let input_queue = if let Some(input) = state.input_queue { + let account_hashes: Result, IndexerError> = input + .account_hashes + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + let current_leaves: Result, IndexerError> = + input.leaves.iter().map(|h| Hash::from_base58(h)).collect(); + let tx_hashes: Result, IndexerError> = input + .tx_hashes + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + let nullifiers: Result, IndexerError> = input + .nullifiers + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + let leaves_hash_chains: Result, IndexerError> = input + .leaves_hash_chains + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + + Some(super::InputQueueDataV2 { + leaf_indices: input.leaf_indices, + account_hashes: account_hashes?, + current_leaves: current_leaves?, + tx_hashes: tx_hashes?, + nullifiers: nullifiers?, + first_queue_index: input.first_queue_index, + leaves_hash_chains: leaves_hash_chains?, + }) + } else { + None + }; + + Some(super::StateQueueDataV2 { + nodes: state.nodes, + node_hashes: node_hashes?, + initial_root, + root_seq: state.root_seq, + output_queue, + input_queue, + }) + } else { + None + }; + + // Transform AddressQueueDataV2 + let address_queue = if let Some(address) = api_response.address_queue { + let addresses: Result, IndexerError> = address + .addresses + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + + let low_element_values: Result, IndexerError> = address + .low_element_values + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + + let low_element_next_values: Result, IndexerError> = address + .low_element_next_values + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + + let low_element_proofs: Result>, IndexerError> = address + .low_element_proofs + .iter() + .map(|proof_vec| { + proof_vec + .iter() + .map(|h| Hash::from_base58(h)) + .collect::, IndexerError>>() + }) + .collect(); + + let node_hashes: Result, IndexerError> = address + .node_hashes + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + + let initial_root = Hash::from_base58(&address.initial_root)?; + + let leaves_hash_chains: Result, IndexerError> = address + .leaves_hash_chains + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + + let subtrees: Result, IndexerError> = address + .subtrees + .iter() + .map(|h| Hash::from_base58(h)) + .collect(); + + Some(super::AddressQueueDataV2 { + addresses: addresses?, + low_element_values: low_element_values?, + low_element_next_values: low_element_next_values?, + low_element_indices: address.low_element_indices, + low_element_next_indices: address.low_element_next_indices, + low_element_proofs: low_element_proofs?, + nodes: address.nodes, + node_hashes: node_hashes?, + initial_root, + first_queue_index: address.start_index, + leaves_hash_chains: leaves_hash_chains?, + subtrees: subtrees?, + start_index: address.start_index, + root_seq: address.root_seq, + }) + } else { + None + }; + + Ok(Response { + context: Context { + slot: api_response.context.slot, + }, + value: super::QueueElementsV2Result { + state_queue, + address_queue, + }, + }) + }) + .await + } + } + async fn get_subtrees( &self, _merkle_tree_pubkey: [u8; 32], diff --git a/sdk-libs/client/src/indexer/types.rs b/sdk-libs/client/src/indexer/types.rs index 7638274eef..106ac5dcbd 100644 --- a/sdk-libs/client/src/indexer/types.rs +++ b/sdk-libs/client/src/indexer/types.rs @@ -51,6 +51,76 @@ pub struct QueueElementsResult { pub input_queue_index: Option, } +/// V2 Output Queue Data +#[derive(Debug, Clone, PartialEq, Default)] +pub struct OutputQueueDataV2 { + pub leaf_indices: Vec, + pub account_hashes: Vec<[u8; 32]>, + pub old_leaves: Vec<[u8; 32]>, + pub first_queue_index: u64, + /// The tree's next_index - where new leaves will be appended + pub next_index: u64, + /// Pre-computed hash chains per ZKP batch (from on-chain) + pub leaves_hash_chains: Vec<[u8; 32]>, +} + +/// V2 Input Queue Data +#[derive(Debug, Clone, PartialEq, Default)] +pub struct InputQueueDataV2 { + pub leaf_indices: Vec, + pub account_hashes: Vec<[u8; 32]>, + pub current_leaves: Vec<[u8; 32]>, + pub tx_hashes: Vec<[u8; 32]>, + /// Pre-computed nullifiers from indexer + pub nullifiers: Vec<[u8; 32]>, + pub first_queue_index: u64, + /// Pre-computed hash chains per ZKP batch (from on-chain) + pub leaves_hash_chains: Vec<[u8; 32]>, +} + +/// State queue data with shared tree nodes for output and input queues +#[derive(Debug, Clone, PartialEq, Default)] +pub struct StateQueueDataV2 { + /// Shared deduplicated tree nodes for state queues (output + input) + /// node_index encoding: (level << 56) | position + pub nodes: Vec, + pub node_hashes: Vec<[u8; 32]>, + /// Initial root for the state tree (shared by output and input queues) + pub initial_root: [u8; 32], + /// Sequence number of the root + pub root_seq: u64, + /// Output queue data (if requested) + pub output_queue: Option, + /// Input queue data (if requested) + pub input_queue: Option, +} + +/// V2 Address Queue Data with deduplicated nodes +#[derive(Debug, Clone, PartialEq, Default)] +pub struct AddressQueueDataV2 { + pub addresses: Vec<[u8; 32]>, + pub low_element_values: Vec<[u8; 32]>, + pub low_element_next_values: Vec<[u8; 32]>, + pub low_element_indices: Vec, + pub low_element_next_indices: Vec, + pub low_element_proofs: Vec>, + pub nodes: Vec, + pub node_hashes: Vec<[u8; 32]>, + pub initial_root: [u8; 32], + pub first_queue_index: u64, + pub leaves_hash_chains: Vec<[u8; 32]>, + pub subtrees: Vec<[u8; 32]>, + pub start_index: u64, + pub root_seq: u64, +} + +/// V2 Queue Elements Result with deduplicated node data +#[derive(Debug, Clone, PartialEq, Default)] +pub struct QueueElementsV2Result { + pub state_queue: Option, + pub address_queue: Option, +} + #[derive(Debug, Clone, PartialEq, Default)] pub struct MerkleProofWithContext { pub proof: Vec<[u8; 32]>, diff --git a/sdk-libs/client/src/rpc/indexer.rs b/sdk-libs/client/src/rpc/indexer.rs index 5ddd6c0372..7266300dc2 100644 --- a/sdk-libs/client/src/rpc/indexer.rs +++ b/sdk-libs/client/src/rpc/indexer.rs @@ -7,8 +7,8 @@ use crate::indexer::{ CompressedTokenAccount, GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, - OwnerBalance, PaginatedOptions, QueueElementsResult, QueueInfoResult, Response, RetryConfig, - SignatureWithMetadata, TokenBalance, ValidityProofWithContext, + OwnerBalance, PaginatedOptions, QueueInfoResult, Response, RetryConfig, SignatureWithMetadata, + TokenBalance, ValidityProofWithContext, }; #[async_trait] @@ -201,42 +201,31 @@ impl Indexer for LightClient { .await?) } - async fn get_queue_elements( - &mut self, - merkle_tree_pubkey: [u8; 32], - output_queue_start_index: Option, - output_queue_limit: Option, - input_queue_start_index: Option, - input_queue_limit: Option, + async fn get_queue_info( + &self, config: Option, - ) -> Result, IndexerError> { + ) -> Result, IndexerError> { Ok(self .indexer - .as_mut() + .as_ref() .ok_or(IndexerError::NotInitialized)? - .get_queue_elements( - merkle_tree_pubkey, - output_queue_start_index, - output_queue_limit, - input_queue_start_index, - input_queue_limit, - config, - ) + .get_queue_info(config) .await?) } - async fn get_queue_info( - &self, + async fn get_queue_elements( + &mut self, + merkle_tree_pubkey: [u8; 32], + options: crate::indexer::QueueElementsV2Options, config: Option, - ) -> Result, IndexerError> { + ) -> Result, IndexerError> { Ok(self .indexer - .as_ref() + .as_mut() .ok_or(IndexerError::NotInitialized)? - .get_queue_info(config) + .get_queue_elements(merkle_tree_pubkey, options, config) .await?) } - async fn get_subtrees( &self, merkle_tree_pubkey: [u8; 32], diff --git a/sdk-libs/photon-api/src/apis/default_api.rs b/sdk-libs/photon-api/src/apis/default_api.rs index d0dd52fa51..b623ba3353 100644 --- a/sdk-libs/photon-api/src/apis/default_api.rs +++ b/sdk-libs/photon-api/src/apis/default_api.rs @@ -1997,6 +1997,55 @@ pub async fn get_validity_proof_v2_post( } } +/// struct for typed errors of method [`get_queue_elements_v2_post`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetQueueElementsV2PostError { + Status429(models::GetBatchAddressUpdateInfoPost429Response), + Status500(models::GetBatchAddressUpdateInfoPost429Response), + UnknownValue(serde_json::Value), +} + +/// V2: Get queue elements with deduplicated nodes +pub async fn get_queue_elements_v2_post( + configuration: &configuration::Configuration, + get_queue_elements_v2_post_request: models::GetQueueElementsV2PostRequest, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/getQueueElements", local_var_configuration.base_path); + let local_var_uri_str = append_api_key(local_var_configuration, &local_var_uri_str); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + local_var_req_builder = local_var_req_builder.json(&get_queue_elements_v2_post_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + fn append_api_key(configuration: &Configuration, uri_str: &str) -> String { let mut uri_str = uri_str.to_string(); if let Some(ref api_key) = configuration.api_key { diff --git a/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_200_response.rs b/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_200_response.rs new file mode 100644 index 0000000000..13441587c3 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_200_response.rs @@ -0,0 +1,60 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetQueueElementsV2Post200Response { + #[serde(rename = "error", skip_serializing_if = "Option::is_none")] + pub error: Option>, + /// An ID to identify the response. + #[serde(rename = "id")] + pub id: Id, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: Jsonrpc, + #[serde(rename = "result", skip_serializing_if = "Option::is_none")] + pub result: Option>, +} + +impl GetQueueElementsV2Post200Response { + pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetQueueElementsV2Post200Response { + GetQueueElementsV2Post200Response { + error: None, + id, + jsonrpc, + result: None, + } + } +} +/// An ID to identify the response. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum Id { + #[serde(rename = "test-account")] + TestAccount, +} + +impl Default for Id { + fn default() -> Id { + Self::TestAccount + } +} +/// The version of the JSON-RPC protocol. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum Jsonrpc { + #[serde(rename = "2.0")] + Variant2Period0, +} + +impl Default for Jsonrpc { + fn default() -> Jsonrpc { + Self::Variant2Period0 + } +} diff --git a/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_200_response_result.rs b/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_200_response_result.rs new file mode 100644 index 0000000000..34b395e7e8 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_200_response_result.rs @@ -0,0 +1,31 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetQueueElementsV2Post200ResponseResult { + #[serde(rename = "context")] + pub context: Box, + #[serde(rename = "stateQueue", skip_serializing_if = "Option::is_none")] + pub state_queue: Option>, + #[serde(rename = "addressQueue", skip_serializing_if = "Option::is_none")] + pub address_queue: Option>, +} + +impl GetQueueElementsV2Post200ResponseResult { + pub fn new(context: models::Context) -> GetQueueElementsV2Post200ResponseResult { + GetQueueElementsV2Post200ResponseResult { + context: Box::new(context), + state_queue: None, + address_queue: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_request.rs b/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_request.rs new file mode 100644 index 0000000000..9adc582be7 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_request.rs @@ -0,0 +1,78 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetQueueElementsV2PostRequest { + /// An ID to identify the request. + #[serde(rename = "id")] + pub id: Id, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: Jsonrpc, + /// The name of the method to invoke. + #[serde(rename = "method")] + pub method: Method, + #[serde(rename = "params")] + pub params: Box, +} + +impl GetQueueElementsV2PostRequest { + pub fn new( + id: Id, + jsonrpc: Jsonrpc, + method: Method, + params: models::GetQueueElementsV2PostRequestParams, + ) -> GetQueueElementsV2PostRequest { + GetQueueElementsV2PostRequest { + id, + jsonrpc, + method, + params: Box::new(params), + } + } +} +/// An ID to identify the request. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum Id { + #[serde(rename = "test-account")] + TestAccount, +} + +impl Default for Id { + fn default() -> Id { + Self::TestAccount + } +} +/// The version of the JSON-RPC protocol. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum Jsonrpc { + #[serde(rename = "2.0")] + Variant2Period0, +} + +impl Default for Jsonrpc { + fn default() -> Jsonrpc { + Self::Variant2Period0 + } +} +/// The name of the method to invoke. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum Method { + #[serde(rename = "getQueueElements")] + GetQueueElementsV2, +} + +impl Default for Method { + fn default() -> Method { + Self::GetQueueElementsV2 + } +} diff --git a/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_request_params.rs b/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_request_params.rs new file mode 100644 index 0000000000..3eff127d3d --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_queue_elements_v2_post_request_params.rs @@ -0,0 +1,78 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetQueueElementsV2PostRequestParams { + /// The merkle tree public key + #[serde(rename = "tree")] + pub tree: String, + /// Starting index for the output queue + #[serde( + rename = "outputQueueStartIndex", + skip_serializing_if = "Option::is_none" + )] + pub output_queue_start_index: Option, + /// Limit for the output queue elements + #[serde(rename = "outputQueueLimit", skip_serializing_if = "Option::is_none")] + pub output_queue_limit: Option, + /// Optional override for the output queue ZKP batch size + #[serde( + rename = "outputQueueZkpBatchSize", + skip_serializing_if = "Option::is_none" + )] + pub output_queue_zkp_batch_size: Option, + /// Starting index for the input queue + #[serde( + rename = "inputQueueStartIndex", + skip_serializing_if = "Option::is_none" + )] + pub input_queue_start_index: Option, + /// Limit for the input queue elements + #[serde(rename = "inputQueueLimit", skip_serializing_if = "Option::is_none")] + pub input_queue_limit: Option, + /// Optional override for the input queue ZKP batch size + #[serde( + rename = "inputQueueZkpBatchSize", + skip_serializing_if = "Option::is_none" + )] + pub input_queue_zkp_batch_size: Option, + /// Starting index for the address queue + #[serde( + rename = "addressQueueStartIndex", + skip_serializing_if = "Option::is_none" + )] + pub address_queue_start_index: Option, + /// Limit for the address queue elements + #[serde(rename = "addressQueueLimit", skip_serializing_if = "Option::is_none")] + pub address_queue_limit: Option, + /// Optional override for the address queue ZKP batch size + #[serde( + rename = "addressQueueZkpBatchSize", + skip_serializing_if = "Option::is_none" + )] + pub address_queue_zkp_batch_size: Option, +} + +impl GetQueueElementsV2PostRequestParams { + pub fn new(tree: String) -> GetQueueElementsV2PostRequestParams { + GetQueueElementsV2PostRequestParams { + tree, + output_queue_start_index: None, + output_queue_limit: None, + output_queue_zkp_batch_size: None, + input_queue_start_index: None, + input_queue_limit: None, + input_queue_zkp_batch_size: None, + address_queue_start_index: None, + address_queue_limit: None, + address_queue_zkp_batch_size: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/address_queue_data_v2.rs b/sdk-libs/photon-api/src/models/address_queue_data_v2.rs new file mode 100644 index 0000000000..6c6161bafa --- /dev/null +++ b/sdk-libs/photon-api/src/models/address_queue_data_v2.rs @@ -0,0 +1,81 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +#[allow(clippy::too_many_arguments)] +pub struct AddressQueueDataV2 { + #[serde(rename = "addresses")] + pub addresses: Vec, + #[serde(rename = "queueIndices")] + pub queue_indices: Vec, + /// Deduplicated tree nodes for address tree non-inclusion proofs + /// node_index encoding: (level << 56) | position + #[serde(rename = "nodes")] + pub nodes: Vec, + #[serde(rename = "nodeHashes")] + pub node_hashes: Vec, + #[serde(rename = "lowElementIndices")] + pub low_element_indices: Vec, + #[serde(rename = "lowElementValues")] + pub low_element_values: Vec, + #[serde(rename = "lowElementNextIndices")] + pub low_element_next_indices: Vec, + #[serde(rename = "lowElementNextValues")] + pub low_element_next_values: Vec, + #[serde(rename = "lowElementProofs", default)] + pub low_element_proofs: Vec>, + #[serde(rename = "leavesHashChains", default)] + pub leaves_hash_chains: Vec, + #[serde(rename = "initialRoot")] + pub initial_root: String, + #[serde(rename = "startIndex")] + pub start_index: u64, + #[serde(rename = "subtrees", default)] + pub subtrees: Vec, + #[serde(rename = "rootSeq", default)] + pub root_seq: u64, +} + +impl AddressQueueDataV2 { + #[allow(clippy::too_many_arguments)] + pub fn new( + addresses: Vec, + queue_indices: Vec, + nodes: Vec, + node_hashes: Vec, + low_element_indices: Vec, + low_element_values: Vec, + low_element_next_indices: Vec, + low_element_next_values: Vec, + low_element_proofs: Vec>, + leaves_hash_chains: Vec, + initial_root: String, + start_index: u64, + subtrees: Vec, + root_seq: u64, + ) -> AddressQueueDataV2 { + AddressQueueDataV2 { + addresses, + queue_indices, + nodes, + node_hashes, + low_element_indices, + low_element_values, + low_element_next_indices, + low_element_next_values, + low_element_proofs, + leaves_hash_chains, + initial_root, + start_index, + subtrees, + root_seq, + } + } +} diff --git a/sdk-libs/photon-api/src/models/input_queue_data_v2.rs b/sdk-libs/photon-api/src/models/input_queue_data_v2.rs new file mode 100644 index 0000000000..3dc2489c71 --- /dev/null +++ b/sdk-libs/photon-api/src/models/input_queue_data_v2.rs @@ -0,0 +1,49 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct InputQueueDataV2 { + #[serde(rename = "leafIndices")] + pub leaf_indices: Vec, + #[serde(rename = "accountHashes")] + pub account_hashes: Vec, + #[serde(rename = "leaves")] + pub leaves: Vec, + #[serde(rename = "txHashes")] + pub tx_hashes: Vec, + #[serde(rename = "nullifiers")] + pub nullifiers: Vec, + #[serde(rename = "firstQueueIndex")] + pub first_queue_index: u64, + #[serde(rename = "leavesHashChains")] + pub leaves_hash_chains: Vec, +} + +impl InputQueueDataV2 { + pub fn new( + leaf_indices: Vec, + account_hashes: Vec, + leaves: Vec, + tx_hashes: Vec, + nullifiers: Vec, + first_queue_index: u64, + leaves_hash_chains: Vec, + ) -> InputQueueDataV2 { + InputQueueDataV2 { + leaf_indices, + account_hashes, + leaves, + tx_hashes, + nullifiers, + first_queue_index, + leaves_hash_chains, + } + } +} diff --git a/sdk-libs/photon-api/src/models/mod.rs b/sdk-libs/photon-api/src/models/mod.rs index b8aa4810dd..70cd7de750 100644 --- a/sdk-libs/photon-api/src/models/mod.rs +++ b/sdk-libs/photon-api/src/models/mod.rs @@ -330,3 +330,19 @@ pub mod _get_queue_info_post_200_response_result; pub use self::_get_queue_info_post_200_response_result::{ GetQueueInfoPost200ResponseResult, QueueInfo, }; +pub mod address_queue_data_v2; +pub use self::address_queue_data_v2::AddressQueueDataV2; +pub mod _get_queue_elements_v2_post_200_response; +pub use self::_get_queue_elements_v2_post_200_response::GetQueueElementsV2Post200Response; +pub mod _get_queue_elements_v2_post_200_response_result; +pub use self::_get_queue_elements_v2_post_200_response_result::GetQueueElementsV2Post200ResponseResult; +pub mod _get_queue_elements_v2_post_request; +pub use self::_get_queue_elements_v2_post_request::GetQueueElementsV2PostRequest; +pub mod _get_queue_elements_v2_post_request_params; +pub use self::_get_queue_elements_v2_post_request_params::GetQueueElementsV2PostRequestParams; +pub mod input_queue_data_v2; +pub use self::input_queue_data_v2::InputQueueDataV2; +pub mod output_queue_data_v2; +pub use self::output_queue_data_v2::OutputQueueDataV2; +pub mod state_queue_data_v2; +pub use self::state_queue_data_v2::StateQueueDataV2; diff --git a/sdk-libs/photon-api/src/models/output_queue_data_v2.rs b/sdk-libs/photon-api/src/models/output_queue_data_v2.rs new file mode 100644 index 0000000000..39ffd5448e --- /dev/null +++ b/sdk-libs/photon-api/src/models/output_queue_data_v2.rs @@ -0,0 +1,45 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct OutputQueueDataV2 { + #[serde(rename = "leafIndices")] + pub leaf_indices: Vec, + #[serde(rename = "accountHashes")] + pub account_hashes: Vec, + #[serde(rename = "leaves")] + pub leaves: Vec, + #[serde(rename = "firstQueueIndex")] + pub first_queue_index: u64, + #[serde(rename = "nextIndex")] + pub next_index: u64, + #[serde(rename = "leavesHashChains")] + pub leaves_hash_chains: Vec, +} + +impl OutputQueueDataV2 { + pub fn new( + leaf_indices: Vec, + account_hashes: Vec, + leaves: Vec, + first_queue_index: u64, + next_index: u64, + leaves_hash_chains: Vec, + ) -> OutputQueueDataV2 { + OutputQueueDataV2 { + leaf_indices, + account_hashes, + leaves, + first_queue_index, + next_index, + leaves_hash_chains, + } + } +} diff --git a/sdk-libs/photon-api/src/models/state_queue_data_v2.rs b/sdk-libs/photon-api/src/models/state_queue_data_v2.rs new file mode 100644 index 0000000000..70b52f9c7b --- /dev/null +++ b/sdk-libs/photon-api/src/models/state_queue_data_v2.rs @@ -0,0 +1,46 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.0.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; + +/// State queue data with shared tree nodes for output and input queues +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +#[allow(clippy::too_many_arguments)] +pub struct StateQueueDataV2 { + /// Shared deduplicated tree nodes for state queues (output + input) + /// node_index encoding: (level << 56) | position + #[serde(rename = "nodes", skip_serializing_if = "Vec::is_empty", default)] + pub nodes: Vec, + #[serde(rename = "nodeHashes", skip_serializing_if = "Vec::is_empty", default)] + pub node_hashes: Vec, + /// Initial root for the state tree (shared by output and input queues) + #[serde(rename = "initialRoot")] + pub initial_root: String, + /// Sequence number of the root + #[serde(rename = "rootSeq")] + pub root_seq: u64, + #[serde(rename = "outputQueue", skip_serializing_if = "Option::is_none")] + pub output_queue: Option>, + #[serde(rename = "inputQueue", skip_serializing_if = "Option::is_none")] + pub input_queue: Option>, +} + +impl StateQueueDataV2 { + pub fn new(initial_root: String, root_seq: u64) -> StateQueueDataV2 { + StateQueueDataV2 { + nodes: Vec::new(), + node_hashes: Vec::new(), + initial_root, + root_seq, + output_queue: None, + input_queue: None, + } + } +} diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index cac5d82f84..b41afc1a32 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -17,8 +17,6 @@ use async_trait::async_trait; use borsh::BorshDeserialize; #[cfg(feature = "devenv")] use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; -#[cfg(feature = "v2")] -use light_client::indexer::MerkleProofWithContext; #[cfg(feature = "devenv")] use light_client::rpc::{Rpc, RpcError}; use light_client::{ @@ -29,8 +27,8 @@ use light_client::{ CompressedTokenAccount, Context, GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, - OwnerBalance, PaginatedOptions, QueueElementsResult, Response, RetryConfig, RootIndex, - SignatureWithMetadata, StateMerkleTreeAccounts, TokenBalance, ValidityProofWithContext, + OwnerBalance, PaginatedOptions, Response, RetryConfig, RootIndex, SignatureWithMetadata, + StateMerkleTreeAccounts, TokenBalance, ValidityProofWithContext, }, }; use light_compressed_account::{ @@ -613,258 +611,6 @@ impl Indexer for TestIndexer { } } - async fn get_queue_elements( - &mut self, - _merkle_tree_pubkey: [u8; 32], - _output_queue_start_index: Option, - _output_queue_limit: Option, - _input_queue_start_index: Option, - _input_queue_limit: Option, - _config: Option, - ) -> Result, IndexerError> { - #[cfg(not(feature = "v2"))] - unimplemented!("get_queue_elements"); - #[cfg(feature = "v2")] - { - let merkle_tree_pubkey = _merkle_tree_pubkey; - let output_queue_start_index = _output_queue_start_index.unwrap_or(0); - let output_queue_limit = _output_queue_limit; - let input_queue_start_index = _input_queue_start_index.unwrap_or(0); - let input_queue_limit = _input_queue_limit; - let pubkey = Pubkey::new_from_array(merkle_tree_pubkey); - - // Check if this is an address tree - let address_tree_bundle = self - .address_merkle_trees - .iter() - .find(|x| x.accounts.merkle_tree == pubkey); - if let Some(address_tree_bundle) = address_tree_bundle { - // For address trees, return output queue only - let output_queue_elements = if let Some(limit) = output_queue_limit { - let start = output_queue_start_index as usize; - let end = std::cmp::min( - start + limit as usize, - address_tree_bundle.queue_elements.len(), - ); - let queue_elements = address_tree_bundle.queue_elements[start..end].to_vec(); - - let merkle_proofs_with_context = queue_elements - .iter() - .map(|element| MerkleProofWithContext { - proof: Vec::new(), - leaf: [0u8; 32], - leaf_index: 0, - merkle_tree: address_tree_bundle.accounts.merkle_tree.to_bytes(), - root: address_tree_bundle.root(), - tx_hash: None, - root_seq: output_queue_start_index, - account_hash: *element, - }) - .collect(); - Some(merkle_proofs_with_context) - } else { - None - }; - - let output_queue_index = if output_queue_elements.is_some() { - Some(output_queue_start_index) - } else { - None - }; - - return Ok(Response { - context: Context { - slot: self.get_current_slot(), - }, - value: QueueElementsResult { - output_queue_elements, - output_queue_index, - input_queue_elements: None, - input_queue_index: None, - }, - }); - } - - // Check if this is a state tree - let state_tree_bundle = self - .state_merkle_trees - .iter_mut() - .find(|x| x.accounts.merkle_tree == pubkey); - - if let Some(state_tree_bundle) = state_tree_bundle { - // For state trees, return both input and output queues - - // Build input queue elements if requested - let input_queue_elements = if let Some(limit) = input_queue_limit { - let start = input_queue_start_index as usize; - let end = std::cmp::min( - start + limit as usize, - state_tree_bundle.input_leaf_indices.len(), - ); - let queue_elements = state_tree_bundle.input_leaf_indices[start..end].to_vec(); - - let merkle_proofs = queue_elements - .iter() - .map(|leaf_info| { - match state_tree_bundle - .merkle_tree - .get_proof_of_leaf(leaf_info.leaf_index as usize, true) - { - Ok(proof) => proof.to_vec(), - Err(_) => { - let mut next_index = - state_tree_bundle.merkle_tree.get_next_index() as u64; - while next_index < leaf_info.leaf_index as u64 { - state_tree_bundle.merkle_tree.append(&[0u8; 32]).unwrap(); - next_index = - state_tree_bundle.merkle_tree.get_next_index() as u64; - } - state_tree_bundle - .merkle_tree - .get_proof_of_leaf(leaf_info.leaf_index as usize, true) - .unwrap() - .to_vec(); - Vec::new() - } - } - }) - .collect::>(); - - let leaves = queue_elements - .iter() - .map(|leaf_info| { - state_tree_bundle - .merkle_tree - .get_leaf(leaf_info.leaf_index as usize) - .unwrap_or_default() - }) - .collect::>(); - - let merkle_proofs_with_context = merkle_proofs - .iter() - .zip(queue_elements.iter()) - .zip(leaves.iter()) - .map(|((proof, element), leaf)| MerkleProofWithContext { - proof: proof.clone(), - leaf: *leaf, - leaf_index: element.leaf_index as u64, - merkle_tree: state_tree_bundle.accounts.merkle_tree.to_bytes(), - root: state_tree_bundle.merkle_tree.root(), - tx_hash: Some(element.tx_hash), - root_seq: 0, - account_hash: element.leaf, - }) - .collect(); - - Some(merkle_proofs_with_context) - } else { - None - }; - - // Build output queue elements if requested - let output_queue_elements = if let Some(limit) = output_queue_limit { - let start = output_queue_start_index as usize; - let end = std::cmp::min( - start + limit as usize, - state_tree_bundle.output_queue_elements.len(), - ); - let queue_elements = - state_tree_bundle.output_queue_elements[start..end].to_vec(); - - let indices = queue_elements - .iter() - .map(|(_, index)| index) - .collect::>(); - - let merkle_proofs = indices - .iter() - .map(|index| { - match state_tree_bundle - .merkle_tree - .get_proof_of_leaf(**index as usize, true) - { - Ok(proof) => proof.to_vec(), - Err(_) => { - let mut next_index = - state_tree_bundle.merkle_tree.get_next_index() as u64; - while next_index < **index { - state_tree_bundle.merkle_tree.append(&[0u8; 32]).unwrap(); - next_index = - state_tree_bundle.merkle_tree.get_next_index() as u64; - } - state_tree_bundle - .merkle_tree - .get_proof_of_leaf(**index as usize, true) - .unwrap() - .to_vec(); - Vec::new() - } - } - }) - .collect::>(); - - let leaves = indices - .iter() - .map(|index| { - state_tree_bundle - .merkle_tree - .get_leaf(**index as usize) - .unwrap_or_default() - }) - .collect::>(); - - let merkle_proofs_with_context = merkle_proofs - .iter() - .zip(queue_elements.iter()) - .zip(leaves.iter()) - .map(|((proof, (element, index)), leaf)| MerkleProofWithContext { - proof: proof.clone(), - leaf: *leaf, - leaf_index: *index, - merkle_tree: state_tree_bundle.accounts.merkle_tree.to_bytes(), - root: state_tree_bundle.merkle_tree.root(), - tx_hash: None, - root_seq: 0, - account_hash: *element, - }) - .collect(); - - Some(merkle_proofs_with_context) - } else { - None - }; - - let output_queue_index = if output_queue_elements.is_some() { - Some(output_queue_start_index) - } else { - None - }; - - let input_queue_index = if input_queue_elements.is_some() { - Some(input_queue_start_index) - } else { - None - }; - - let slot = self.get_current_slot(); - - return Ok(Response { - context: Context { slot }, - value: QueueElementsResult { - output_queue_elements, - output_queue_index, - input_queue_elements, - input_queue_index, - }, - }); - } - - Err(IndexerError::InvalidParameters( - "Merkle tree not found".to_string(), - )) - } - } - async fn get_queue_info( &self, _config: Option, @@ -925,83 +671,7 @@ impl Indexer for TestIndexer { _start_offset: Option, _config: Option, ) -> Result, IndexerError> { - #[cfg(not(feature = "v2"))] - unimplemented!("get_address_queue_with_proofs"); - #[cfg(feature = "v2")] - { - use light_client::indexer::AddressQueueIndex; - let merkle_tree_pubkey = _merkle_tree_pubkey; - let zkp_batch_size = _zkp_batch_size; - - let batch_start_index = self - .get_address_merkle_trees() - .iter() - .find(|x| x.accounts.merkle_tree == *merkle_tree_pubkey) - .unwrap() - .get_v2_indexed_merkle_tree() - .ok_or(IndexerError::Unknown( - "Failed to get v2 indexed merkle tree".into(), - ))? - .merkle_tree - .rightmost_index; - - let address_proof_items = self - .get_queue_elements( - merkle_tree_pubkey.to_bytes(), - Some(0), - Some(zkp_batch_size), - None, - None, - None, - ) - .await - .map_err(|_| IndexerError::Unknown("Failed to get queue elements".into()))? - .value; - - let output_elements = address_proof_items - .output_queue_elements - .ok_or(IndexerError::Unknown("No output queue elements".into()))?; - - let addresses: Vec = output_elements - .iter() - .enumerate() - .map(|(i, proof)| AddressQueueIndex { - address: proof.account_hash, - queue_index: proof.root_seq + i as u64, - }) - .collect(); - let non_inclusion_proofs = self - .get_multiple_new_address_proofs( - merkle_tree_pubkey.to_bytes(), - output_elements.iter().map(|x| x.account_hash).collect(), - None, - ) - .await - .map_err(|_| { - IndexerError::Unknown( - "Failed to get get_multiple_new_address_proofs_full".into(), - ) - })? - .value; - - let subtrees = self - .get_subtrees(merkle_tree_pubkey.to_bytes(), None) - .await - .map_err(|_| IndexerError::Unknown("Failed to get subtrees".into()))? - .value; - - Ok(Response { - context: Context { - slot: self.get_current_slot(), - }, - value: BatchAddressUpdateIndexerResponse { - batch_start_index: batch_start_index as u64, - addresses, - non_inclusion_proofs: non_inclusion_proofs.items, - subtrees: subtrees.items, - }, - }) - } + unimplemented!("get_address_queue_with_proofs not implemented for TestIndexer - needs to be updated to use V2 API") } // New required trait methods @@ -1061,6 +731,15 @@ impl Indexer for TestIndexer { async fn get_indexer_health(&self, _config: Option) -> Result { todo!("get_indexer_health not implemented") } + + async fn get_queue_elements( + &mut self, + _merkle_tree_pubkey: [u8; 32], + _options: light_client::indexer::QueueElementsV2Options, + _config: Option, + ) -> Result, IndexerError> { + unimplemented!("get_queue_elements not implemented for TestIndexer") + } } #[async_trait] diff --git a/sdk-libs/program-test/src/program_test/indexer.rs b/sdk-libs/program-test/src/program_test/indexer.rs index 26beca72eb..125c8a6f0b 100644 --- a/sdk-libs/program-test/src/program_test/indexer.rs +++ b/sdk-libs/program-test/src/program_test/indexer.rs @@ -4,8 +4,8 @@ use light_client::indexer::{ CompressedTokenAccount, GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions, Hash, Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof, NewAddressProofWithContext, - OwnerBalance, PaginatedOptions, QueueElementsResult, Response, RetryConfig, - SignatureWithMetadata, TokenBalance, ValidityProofWithContext, + OwnerBalance, PaginatedOptions, Response, RetryConfig, SignatureWithMetadata, TokenBalance, + ValidityProofWithContext, }; use solana_sdk::pubkey::Pubkey; @@ -197,30 +197,6 @@ impl Indexer for LightProgramTest { .await?) } - async fn get_queue_elements( - &mut self, - merkle_tree_pubkey: [u8; 32], - output_queue_start_index: Option, - output_queue_limit: Option, - input_queue_start_index: Option, - input_queue_limit: Option, - config: Option, - ) -> Result, IndexerError> { - Ok(self - .indexer - .as_mut() - .ok_or(IndexerError::NotInitialized)? - .get_queue_elements( - merkle_tree_pubkey, - output_queue_start_index, - output_queue_limit, - input_queue_start_index, - input_queue_limit, - config, - ) - .await?) - } - async fn get_queue_info( &self, config: Option, @@ -338,4 +314,18 @@ impl Indexer for LightProgramTest { .get_indexer_health(config) .await?) } + + async fn get_queue_elements( + &mut self, + merkle_tree_pubkey: [u8; 32], + options: light_client::indexer::QueueElementsV2Options, + config: Option, + ) -> Result, IndexerError> { + Ok(self + .indexer + .as_mut() + .ok_or(IndexerError::NotInitialized)? + .get_queue_elements(merkle_tree_pubkey, options, config) + .await?) + } }