diff --git a/.gitignore b/.gitignore index 2e546578..9e2125ea 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,5 @@ test.db docker-compose.yml .cursor -**/photon.log \ No newline at end of file +**/photon.log +proto/**/*.bin diff --git a/Cargo.lock b/Cargo.lock index d72356ad..2c8641f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,23 +56,23 @@ dependencies = [ [[package]] name = "agave-feature-set" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29098b42572aa09c9fdb620b50774aa0b907e880aa41ff99fb1892417c9672cc" +checksum = "b6b71300ed93a9dff1c3231c3f1417e242e3da38529ebc32f828bc8560bf4a2a" dependencies = [ "ahash 0.8.12", "solana-epoch-schedule", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-pubkey 3.0.0", - "solana-sha256-hasher 3.0.0", + "solana-sha256-hasher 3.1.0", "solana-svm-feature-set", ] [[package]] name = "agave-reserved-account-keys" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9db52270156139b115e25087a4850e28097533f48e713cd73bfef570112514d" +checksum = "ac34d0410a2a015df7d45d092449c7ec59264081d05f18c7f305ccf7c81bd3b7" dependencies = [ "agave-feature-set", "solana-pubkey 3.0.0", @@ -126,7 +126,7 @@ checksum = "48a526ec4434d531d488af59fe866f36b310fe8906691c75dffa664450a3800a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -191,22 +191,22 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.10" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -338,7 +338,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -364,7 +364,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -439,7 +439,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -568,9 +568,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.32" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a89bce6054c720275ac2432fbba080a66a2106a44a1b804553930ca6909f4e0" +checksum = "93c1f86859c1af3d514fa19e8323147ff10ea98684e6c7b307912509f50e67b2" dependencies = [ "compression-codecs", "compression-core", @@ -684,7 +684,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -701,7 +701,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -787,9 +787,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871" +checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" dependencies = [ "axum-core", "bytes", @@ -1004,7 +1004,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1151,7 +1151,7 @@ checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1162,9 +1162,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" dependencies = [ "serde", ] @@ -1198,9 +1198,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.44" +version = "1.2.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37521ac7aabe3d13122dc382493e20c9416f299d2ccd5b3a5340a2570cdeb0f3" +checksum = "cd405d82c84ff7f35739f175f67d8b9fb7687a0e84ccdc78bd3568839827cf07" dependencies = [ "find-msvc-tools", "jobserver", @@ -1234,7 +1234,7 @@ checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1248,7 +1248,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -1280,9 +1280,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.51" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive 4.5.49", @@ -1290,9 +1290,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.51" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -1322,7 +1322,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1353,7 +1353,7 @@ dependencies = [ "dotenv", "futures-util", "hex", - "jsonwebtoken", + "jsonwebtoken 7.2.0", "lazy_static", "openssl", "percent-encoding", @@ -1394,9 +1394,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.31" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8a506ec4b81c460798f572caead636d57d3d7e940f998160f52bd254bf2d23" +checksum = "680dc087785c5230f8e8843e2e57ac7c1c90488b6a91b88caa265410568f441b" dependencies = [ "brotli 8.0.2", "compression-core", @@ -1406,9 +1406,9 @@ dependencies = [ [[package]] name = "compression-core" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e47641d3deaf41fb1538ac1f54735925e275eaf3bf4d55c81b137fba797e5cbb" +checksum = "3a9b614a5787ef0c8802a55766480563cb3a93b435898c422ed2a359cf811582" [[package]] name = "concurrent-queue" @@ -1586,9 +1586,9 @@ checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1630,7 +1630,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1654,7 +1654,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1665,7 +1665,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1807,7 +1807,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1830,7 +1830,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1888,7 +1888,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -1932,7 +1932,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -2010,9 +2010,9 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "find-msvc-tools" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" [[package]] name = "five8" @@ -2020,7 +2020,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75b8549488b4715defcb0d8a8a1c1c76a80661b5fa106b4ca0e7fce59d7d875" dependencies = [ - "five8_core", + "five8_core 0.1.2", +] + +[[package]] +name = "five8" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23f76610e969fa1784327ded240f1e28a3fd9520c9cec93b636fcf62dd37f772" +dependencies = [ + "five8_core 1.0.0", ] [[package]] @@ -2029,7 +2038,16 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26dec3da8bc3ef08f2c04f61eab298c3ab334523e55f076354d6d6f613799a7b" dependencies = [ - "five8_core", + "five8_core 0.1.2", +] + +[[package]] +name = "five8_const" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a0f1728185f277989ca573a402716ae0beaaea3f76a8ff87ef9dd8fb19436c5" +dependencies = [ + "five8_core 1.0.0", ] [[package]] @@ -2038,6 +2056,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2551bf44bc5f776c15044b9b94153a00198be06743e262afaaa61f11ac7523a5" +[[package]] +name = "five8_core" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "059c31d7d36c43fe39d89e55711858b4da8be7eb6dabac23c7289b1a19489406" + [[package]] name = "fixedbitset" version = "0.5.7" @@ -2197,7 +2221,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -2238,9 +2262,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.9" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -2361,7 +2385,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.12.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -2380,7 +2404,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.12.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -2435,9 +2459,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" [[package]] name = "hashlink" @@ -2636,9 +2660,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ "atomic-waker", "bytes", @@ -2664,14 +2688,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.3.1", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-util", - "rustls 0.23.34", + "rustls 0.23.35", "rustls-pki-types", "tokio", "tokio-rustls 0.26.4", "tower-service", - "webpki-roots 1.0.3", + "webpki-roots 1.0.4", ] [[package]] @@ -2680,7 +2704,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.7.0", + "hyper 1.8.1", "hyper-util", "pin-project-lite", "tokio", @@ -2708,7 +2732,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-util", "native-tls", "tokio", @@ -2718,9 +2742,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" dependencies = [ "base64 0.22.1", "bytes", @@ -2729,7 +2753,7 @@ dependencies = [ "futures-util", "http 1.3.1", "http-body 1.0.1", - "hyper 1.7.0", + "hyper 1.8.1", "ipnet", "libc", "percent-encoding", @@ -2886,21 +2910,21 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "equivalent", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "serde", "serde_core", ] [[package]] name = "indicatif" -version = "0.18.2" +version = "0.18.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade6dfcba0dfb62ad59e59e7241ec8912af34fd29e0e743e3db992bd278e8b65" +checksum = "9375e112e4b463ec1b1c6c011953545c65a30164fbab5b581df32b3abf0dcb88" dependencies = [ "console 0.16.1", "portable-atomic", @@ -2920,9 +2944,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.43.2" +version = "1.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fdb647ebde000f43b5b53f773c30cf9b0cb4300453208713fa38b2c70935a0" +checksum = "e8732d3774162a0851e3f2b150eb98f31a9885dd75985099421d393385a01dfd" dependencies = [ "console 0.15.11", "once_cell", @@ -2956,9 +2980,9 @@ dependencies = [ [[package]] name = "iri-string" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" dependencies = [ "memchr", "serde", @@ -3168,7 +3192,22 @@ dependencies = [ "ring 0.16.20", "serde", "serde_json", - "simple_asn1", + "simple_asn1 0.4.1", +] + +[[package]] +name = "jsonwebtoken" +version = "9.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +dependencies = [ + "base64 0.22.1", + "js-sys", + "pem 3.0.6", + "ring 0.17.14", + "serde", + "serde_json", + "simple_asn1 0.6.3", ] [[package]] @@ -3378,7 +3417,7 @@ dependencies = [ "proc-macro2", "quote", "solana-pubkey 2.4.0", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -3427,7 +3466,7 @@ checksum = "0a8be18fe4de58a6f754caa74a3fbc6d8a758a26f1f3c24d5b0f5b55df5f5408" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -3469,7 +3508,7 @@ dependencies = [ "lazy_static", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -3531,7 +3570,7 @@ checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -3755,7 +3794,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -3828,7 +3867,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -3860,9 +3899,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.74" +version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ad14dd45412269e1a30f52ad8f0664f0f4f4a89ee8fe28c3b3527021ebb654" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ "bitflags 2.10.0", "cfg-if", @@ -3881,7 +3920,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -3892,9 +3931,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.110" +version = "0.9.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a9f0075ba3c21b09f8e8b2026584b1d18d49388648f2fbbf3c97ea8deced8e2" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", @@ -3998,7 +4037,7 @@ dependencies = [ "libc", "redox_syscall 0.5.18", "smallvec", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -4036,6 +4075,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64 0.22.1", + "serde_core", +] + [[package]] name = "percent-encoding" version = "2.3.2" @@ -4058,12 +4107,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.12.0", + "indexmap 2.12.1", ] [[package]] name = "photon-indexer" -version = "0.51.1" +version = "0.51.2" dependencies = [ "anyhow", "ark-bn254 0.5.0", @@ -4080,7 +4129,7 @@ dependencies = [ "bytes", "cadence", "cadence-macros", - "clap 4.5.51", + "clap 4.5.53", "cloud-storage", "dirs 5.0.1", "function_name", @@ -4091,6 +4140,7 @@ dependencies = [ "itertools 0.12.1", "jsonrpsee", "jsonrpsee-core", + "jsonwebtoken 9.3.1", "lazy_static", "light-batched-merkle-tree", "light-compressed-account", @@ -4154,7 +4204,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -4259,7 +4309,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -4351,7 +4401,7 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.108", + "syn 2.0.110", "tempfile", ] @@ -4365,7 +4415,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -4419,9 +4469,9 @@ dependencies = [ [[package]] name = "pulldown-cmark-to-cmark" -version = "21.0.0" +version = "21.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5b6a0769a491a08b31ea5c62494a8f144ee0987d86d670a8af4df1e1b7cde75" +checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" dependencies = [ "pulldown-cmark", ] @@ -4472,7 +4522,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls 0.23.34", + "rustls 0.23.35", "socket2 0.6.1", "thiserror 2.0.17", "tokio", @@ -4493,7 +4543,7 @@ dependencies = [ "rand 0.9.2", "ring 0.17.14", "rustc-hash 2.1.1", - "rustls 0.23.34", + "rustls 0.23.35", "rustls-pki-types", "rustls-platform-verifier", "slab", @@ -4519,9 +4569,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] @@ -4747,7 +4797,7 @@ version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ - "async-compression 0.4.32", + "async-compression 0.4.33", "base64 0.22.1", "bytes", "encoding_rs", @@ -4758,7 +4808,7 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", @@ -4769,7 +4819,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.34", + "rustls 0.23.35", "rustls-pki-types", "serde", "serde_json", @@ -4787,7 +4837,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.3", + "webpki-roots 1.0.4", ] [[package]] @@ -4888,7 +4938,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.108", + "syn 2.0.110", "unicode-ident", ] @@ -5018,27 +5068,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring 0.17.14", - "rustls-webpki 0.101.7", - "sct", -] - -[[package]] -name = "rustls" -version = "0.23.34" +version = "0.23.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" dependencies = [ "log", "once_cell", "ring 0.17.14", "rustls-pki-types", - "rustls-webpki 0.103.8", + "rustls-webpki", "subtle", "zeroize", ] @@ -5085,10 +5123,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.34", + "rustls 0.23.35", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki 0.103.8", + "rustls-webpki", "security-framework 3.5.1", "security-framework-sys", "webpki-root-certs", @@ -5101,16 +5139,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring 0.17.14", - "untrusted 0.9.0", -] - [[package]] name = "rustls-webpki" version = "0.103.8" @@ -5439,7 +5467,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -5469,9 +5497,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.15.1" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa66c845eee442168b2c8134fec70ac50dc20e760769c8ba0ad1319ca1959b04" +checksum = "10574371d41b0d9b2cff89418eda27da52bcaff2cc8741db26382a77c29131f1" dependencies = [ "serde_core", "serde_with_macros", @@ -5479,14 +5507,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.15.1" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91a903660542fced4e99881aa481bdbaec1634568ee02e0b8bd57c64cb38955" +checksum = "08a72d8216842fdd57820dc78d840bef99248e35fb2554ff923319e60f2d686b" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -5495,7 +5523,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "itoa", "ryu", "serde", @@ -5524,7 +5552,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -5634,6 +5662,18 @@ dependencies = [ "num-traits", ] +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "thiserror 2.0.17", + "time", +] + [[package]] name = "siphasher" version = "1.0.1" @@ -5719,9 +5759,9 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64285c3c7bbdaf775e72d8d42b0fa199e120a4633248e0c53caf05849d5e4fc7" +checksum = "5aae985e56861992eb615aa0bcc84275ad3a83f3b56c33033c5bce8edb7740c6" dependencies = [ "Inflector", "base64 0.22.1", @@ -5729,7 +5769,6 @@ dependencies = [ "bs58 0.5.1", "bv", "serde", - "serde_derive", "serde_json", "solana-account", "solana-account-decoder-client-types", @@ -5762,14 +5801,13 @@ dependencies = [ [[package]] name = "solana-account-decoder-client-types" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff10a635163974214065835c82462768f3fb2eaeef558d27edcbd54d1230ddc" +checksum = "81aff309863e7083b95a6552e76f0b3c7ef73b640dc061b69a69f3b2c946cd98" dependencies = [ "base64 0.22.1", "bs58 0.5.1", "serde", - "serde_derive", "serde_json", "solana-account", "solana-pubkey 3.0.0", @@ -5778,34 +5816,43 @@ dependencies = [ [[package]] name = "solana-account-info" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82f4691b69b172c687d218dd2f1f23fc7ea5e9aa79df9ac26dab3d8dd829ce48" +checksum = "fc3397241392f5756925029acaa8515dc70fcbe3d8059d4885d7d6533baf64fd" dependencies = [ + "solana-address 2.0.0", "solana-program-error", "solana-program-memory", - "solana-pubkey 3.0.0", ] [[package]] name = "solana-address" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a7a457086457ea9db9a5199d719dc8734dc2d0342fad0d8f77633c31eb62f19" +checksum = "a2ecac8e1b7f74c2baa9e774c42817e3e75b20787134b76cc4d45e8a604488f5" +dependencies = [ + "solana-address 2.0.0", +] + +[[package]] +name = "solana-address" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e37320fd2945c5d654b2c6210624a52d66c3f1f73b653ed211ab91a703b35bdd" dependencies = [ "borsh 1.5.7", "bytemuck", "bytemuck_derive", "curve25519-dalek", - "five8", - "five8_const", + "five8 1.0.0", + "five8_const 1.0.0", "serde", "serde_derive", "solana-atomic-u64 3.0.0", - "solana-define-syscall 3.0.0", + "solana-define-syscall 4.0.1", "solana-program-error", "solana-sanitize 3.0.1", - "solana-sha256-hasher 3.0.0", + "solana-sha256-hasher 3.1.0", ] [[package]] @@ -5885,16 +5932,16 @@ dependencies = [ [[package]] name = "solana-client" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b78c92bb6a89fadf6a4aa70e44e8c59b7bc023d86b9443d740e026397a3cb0f7" +checksum = "1c3cdae0844cd6c656def9f3b353045035acd83f46f939a5304bb128a1befe1a" dependencies = [ "async-trait", "bincode", "dashmap", "futures", "futures-util", - "indexmap 2.12.0", + "indexmap 2.12.1", "indicatif", "log", "quinn", @@ -5904,11 +5951,12 @@ dependencies = [ "solana-commitment-config", "solana-connection-cache", "solana-epoch-info", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-instruction", "solana-keypair", "solana-measure", "solana-message", + "solana-net-utils", "solana-pubkey 3.0.0", "solana-pubsub-client", "solana-quic-client", @@ -5927,6 +5975,7 @@ dependencies = [ "solana-udp-client", "thiserror 2.0.17", "tokio", + "tokio-util", ] [[package]] @@ -5938,7 +5987,7 @@ dependencies = [ "solana-account", "solana-commitment-config", "solana-epoch-info", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-instruction", "solana-keypair", "solana-message", @@ -5969,14 +6018,14 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb7692fa6bf10a1a86b450c4775526f56d7e0e2116a53313f2533b5694abea64" dependencies = [ - "solana-hash 3.0.0", + "solana-hash 3.1.0", ] [[package]] name = "solana-commitment-config" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fa5933a62dadb7d3ed35e6329de5cebb0678acc8f9cfdf413269084eeccc63f" +checksum = "2e41a3917076a8b5375809078ae3a6fb76a53e364b596ef8c4265e7f410876f3" dependencies = [ "serde", "serde_derive", @@ -6001,15 +6050,15 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ce2d2f1c270cfc06066799f3220c694ba4fdadbcae16f1138ba15f64924a4c" +checksum = "c105365f6d26b218788d21b9bfcdcec6e149cc9c53c36c76fb1afd39aada614f" dependencies = [ "async-trait", "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.12.0", + "indexmap 2.12.1", "log", "rand 0.8.5", "rayon", @@ -6024,28 +6073,28 @@ dependencies = [ [[package]] name = "solana-cpi" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16238feb63d1cbdf915fb287f29ef7a7ebf81469bd6214f8b72a53866b593f8f" +checksum = "4dea26709d867aada85d0d3617db0944215c8bb28d3745b912de7db13a23280c" dependencies = [ "solana-account-info", - "solana-define-syscall 3.0.0", + "solana-define-syscall 4.0.1", "solana-instruction", "solana-program-error", - "solana-pubkey 3.0.0", + "solana-pubkey 4.0.0", "solana-stable-layout", ] [[package]] name = "solana-curve25519" -version = "2.3.13" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae4261b9a8613d10e77ac831a8fa60b6fa52b9b103df46d641deff9f9812a23" +checksum = "134f67bd3031223df4aba035c503e4d14acacfc4cf19af10d10ec9c2605bb84f" dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek", - "solana-define-syscall 2.3.0", + "solana-define-syscall 3.0.0", "subtle", "thiserror 2.0.17", ] @@ -6071,6 +6120,12 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9697086a4e102d28a156b8d6b521730335d6951bd39a5e766512bbe09007cee" +[[package]] +name = "solana-define-syscall" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57e5b1c0bc1d4a4d10c88a4100499d954c09d3fecfae4912c1a074dff68b1738" + [[package]] name = "solana-derivation-path" version = "3.0.0" @@ -6084,9 +6139,9 @@ dependencies = [ [[package]] name = "solana-epoch-info" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8a6b69bd71386f61344f2bcf0f527f5fd6dd3b22add5880e2e1bf1dd1fa8059" +checksum = "e093c84f6ece620a6b10cd036574b0cd51944231ab32d81f80f76d54aba833e6" dependencies = [ "serde", "serde_derive", @@ -6100,7 +6155,7 @@ checksum = "b319a4ed70390af911090c020571f0ff1f4ec432522d05ab89f5c08080381995" dependencies = [ "serde", "serde_derive", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-sdk-ids", "solana-sdk-macro", "solana-sysvar-id", @@ -6149,7 +6204,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b96e9f0300fa287b545613f007dfe20043d7812bee255f418c1eb649c93b63" dependencies = [ - "five8", + "five8 0.2.1", "js-sys", "solana-atomic-u64 2.2.1", "solana-sanitize 2.2.1", @@ -6158,13 +6213,23 @@ dependencies = [ [[package]] name = "solana-hash" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a063723b9e84c14d8c0d2cdf0268207dc7adecf546e31251f9e07c7b00b566c" +checksum = "337c246447142f660f778cf6cb582beba8e28deb05b3b24bfb9ffd7c562e5f41" dependencies = [ + "solana-hash 4.0.1", +] + +[[package]] +name = "solana-hash" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a5d48a6ee7b91fc7b998944ab026ed7b3e2fc8ee3bc58452644a86c2648152f" +dependencies = [ + "borsh 1.5.7", "bytemuck", "bytemuck_derive", - "five8", + "five8 1.0.0", "serde", "serde_derive", "solana-atomic-u64 3.0.0", @@ -6179,23 +6244,24 @@ checksum = "e92f37a14e7c660628752833250dd3dcd8e95309876aee751d7f8769a27947c6" [[package]] name = "solana-instruction" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df4e8fcba01d7efa647ed20a081c234475df5e11a93acb4393cc2c9a7b99bab" +checksum = "ee1b699a2c1518028a9982e255e0eca10c44d90006542d9d7f9f40dbce3f7c78" dependencies = [ "bincode", + "borsh 1.5.7", "serde", "serde_derive", - "solana-define-syscall 3.0.0", + "solana-define-syscall 4.0.1", "solana-instruction-error", - "solana-pubkey 3.0.0", + "solana-pubkey 4.0.0", ] [[package]] name = "solana-instruction-error" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f0d483b8ae387178d9210e0575b666b05cdd4bd0f2f188128249f6e454d39d" +checksum = "b04259e03c05faf38a8c24217b5cfe4c90572ae6184ab49cddb1584fdd756d3f" dependencies = [ "num-traits", "serde", @@ -6223,14 +6289,14 @@ dependencies = [ [[package]] name = "solana-keypair" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952ed9074c12edd2060cb09c2a8c664303f4ab7f7056a407ac37dd1da7bdaa3e" +checksum = "5ac8be597c9e231b0cab2928ce3bc3e4ee77d9c0ad92977b9d901f3879f25a7a" dependencies = [ "ed25519-dalek", - "five8", + "five8 1.0.0", "rand 0.8.5", - "solana-pubkey 3.0.0", + "solana-address 2.0.0", "solana-seed-phrase", "solana-signature", "solana-signer", @@ -6280,9 +6346,9 @@ dependencies = [ [[package]] name = "solana-measure" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dce9330421ef476f95c67f8210d734f9b6a38fc9fcd8abbd306ffbf23361067" +checksum = "f96102f7c7c9f21cba06453b2274a55f279f5c4a0201ddef63df940db9c7bf61" [[package]] name = "solana-message" @@ -6295,8 +6361,8 @@ dependencies = [ "lazy_static", "serde", "serde_derive", - "solana-address", - "solana-hash 3.0.0", + "solana-address 1.1.0", + "solana-hash 3.1.0", "solana-instruction", "solana-sanitize 3.0.1", "solana-sdk-ids", @@ -6306,16 +6372,16 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "214a6a27f28156e0a0bfc1e218a4ac30c5fb42e0d1c481cd8f90de0b98fa0984" +checksum = "a6aabf25b3b0eb42d2bd6aee7b9c7a345975212a135781de42b3164978b28df0" dependencies = [ "crossbeam-channel", "gethostname", "log", "reqwest 0.12.24", "solana-cluster-type", - "solana-sha256-hasher 3.0.0", + "solana-sha256-hasher 3.1.0", "solana-time-utils", "thiserror 2.0.17", ] @@ -6331,21 +6397,23 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c465c3bca426bfca3548c41352b5b358a0401bdd22b1fcef45474ce94cc23a1" +checksum = "24116f6bd91038a99b79701d452eadd6f0dfb445311380658ab082491ea716c4" dependencies = [ "anyhow", "bincode", "bytes", + "cfg-if", + "dashmap", "itertools 0.12.1", "log", "nix", "rand 0.8.5", "serde", - "serde_derive", "socket2 0.6.1", "solana-serde", + "solana-svm-type-overrides", "tokio", "url", ] @@ -6359,9 +6427,9 @@ dependencies = [ "serde", "serde_derive", "solana-fee-calculator", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-pubkey 3.0.0", - "solana-sha256-hasher 3.0.0", + "solana-sha256-hasher 3.1.0", ] [[package]] @@ -6389,9 +6457,9 @@ dependencies = [ [[package]] name = "solana-perf" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a5096d12294fb0da9819fe198d0f003a111d29cfa3c0e49b9ed6380577396e5" +checksum = "853368b085bfbf1775dec3d4cce628a7c045218a84f2e235469906cd8e1478f4" dependencies = [ "ahash 0.8.12", "bincode", @@ -6407,7 +6475,7 @@ dependencies = [ "rand 0.8.5", "rayon", "serde", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-message", "solana-metrics", "solana-packet", @@ -6417,19 +6485,19 @@ dependencies = [ "solana-short-vec", "solana-signature", "solana-time-utils", + "solana-transaction-context", ] [[package]] name = "solana-program-entrypoint" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6557cf5b5e91745d1667447438a1baa7823c6086e4ece67f8e6ebfa7a8f72660" +checksum = "84c9b0a1ff494e05f503a08b3d51150b73aa639544631e510279d6375f290997" dependencies = [ "solana-account-info", - "solana-define-syscall 3.0.0", - "solana-msg", + "solana-define-syscall 4.0.1", "solana-program-error", - "solana-pubkey 3.0.0", + "solana-pubkey 4.0.0", ] [[package]] @@ -6443,11 +6511,11 @@ dependencies = [ [[package]] name = "solana-program-memory" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10e5660c60749c7bfb30b447542529758e4dbcecd31b1e8af1fdc92e2bdde90a" +checksum = "4068648649653c2c50546e9a7fb761791b5ab0cda054c771bb5808d3a4b9eb52" dependencies = [ - "solana-define-syscall 3.0.0", + "solana-define-syscall 4.0.1", ] [[package]] @@ -6472,8 +6540,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b62adb9c3261a052ca1f999398c388f1daf558a1b492f60a6d9e64857db4ff1" dependencies = [ "curve25519-dalek", - "five8", - "five8_const", + "five8 0.2.1", + "five8_const 0.1.4", "getrandom 0.2.16", "js-sys", "num-traits", @@ -6491,14 +6559,23 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8909d399deb0851aa524420beeb5646b115fd253ef446e35fe4504c904da3941" dependencies = [ - "solana-address", + "solana-address 1.1.0", +] + +[[package]] +name = "solana-pubkey" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6f7104d456b58e1418c21a8581e89810278d1190f70f27ece7fc0b2c9282a57" +dependencies = [ + "solana-address 2.0.0", ] [[package]] name = "solana-pubsub-client" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38812207b0b1b66a7df0558df9a6d53eb7aa495d00ce0d8bef1628b3774a5f29" +checksum = "06c39e4d0918c573095cb5c004aa6e915e0af76ff9cc7e33f97dc1df52bdfeb7" dependencies = [ "crossbeam-channel", "futures-util", @@ -6506,7 +6583,6 @@ dependencies = [ "log", "semver", "serde", - "serde_derive", "serde_json", "solana-account-decoder-client-types", "solana-clock", @@ -6523,9 +6599,9 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff930459fa06e95cb2d020f5be1b3d47b9f3a0e22e68c67b50537dca908b3aa" +checksum = "02ed60f78f9a56f67b059edf34b2a4a1afd1eaf165402be9dc511581a7ec569f" dependencies = [ "async-lock", "async-trait", @@ -6534,7 +6610,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.34", + "rustls 0.23.35", "solana-connection-cache", "solana-keypair", "solana-measure", @@ -6562,9 +6638,9 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5034d175b90f0b5a5ff155eff5be091dfbc300ba162e1d35b8cd72be1a0d670b" +checksum = "19142fd63c774e0b4b3a25098f68683128db44bc7d86986400b98d8544417385" dependencies = [ "log", "num_cpus", @@ -6595,9 +6671,9 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e038dea8817f8a713e0077226cfe638b93c44cf861e3f9545ef40b8e71bc78" +checksum = "9ee9e21cb2a6b56ebe30ed4730c367018ead598a3a7a99fcc4bbb29681ff9670" dependencies = [ "async-trait", "base64 0.22.1", @@ -6610,16 +6686,16 @@ dependencies = [ "reqwest-middleware", "semver", "serde", - "serde_derive", "serde_json", "solana-account", + "solana-account-decoder", "solana-account-decoder-client-types", "solana-clock", "solana-commitment-config", "solana-epoch-info", "solana-epoch-schedule", "solana-feature-gate-interface", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-instruction", "solana-message", "solana-pubkey 3.0.0", @@ -6635,16 +6711,15 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4908dbe81349db6ae851d808bef3078e49937400ad687e1c4e78b79f796ff88c" +checksum = "6ce31a0c56989efe7bf05f03303c44f8cd67788c7c5ace5352270a414d715dd3" dependencies = [ "anyhow", "jsonrpc-core", "reqwest 0.12.24", "reqwest-middleware", "serde", - "serde_derive", "serde_json", "solana-account-decoder-client-types", "solana-clock", @@ -6657,13 +6732,13 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f981ef4da0734f459f5b71d1e8dcd6807c17721681714099c90ff6848c7dbb4a" +checksum = "ede5005bd6f29d131fdaeae736348b5c6dc238a4be42550ade03d27d32ebf676" dependencies = [ "solana-account", "solana-commitment-config", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-message", "solana-nonce", "solana-pubkey 3.0.0", @@ -6674,23 +6749,24 @@ dependencies = [ [[package]] name = "solana-rpc-client-types" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0305c8cf8fca27a3f0385ad1d400b2cdde99d6cad2187370acdce117f93bd58f" +checksum = "a1d9dda2ecfac8ab5835a8b42b08605d0696a9deb28ad3269db4d8f9dbaf31ef" dependencies = [ "base64 0.22.1", "bs58 0.5.1", "semver", "serde", - "serde_derive", "serde_json", "solana-account", "solana-account-decoder-client-types", + "solana-address 1.1.0", "solana-clock", "solana-commitment-config", "solana-fee-calculator", "solana-inflation", - "solana-pubkey 3.0.0", + "solana-reward-info", + "solana-transaction", "solana-transaction-error", "solana-transaction-status-client-types", "solana-version", @@ -6712,9 +6788,9 @@ checksum = "dcf09694a0fc14e5ffb18f9b7b7c0f15ecb6eac5b5610bf76a1853459d19daf9" [[package]] name = "solana-sbpf" -version = "0.12.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f224d906c14efc7ed7f42bc5fe9588f3f09db8cabe7f6023adda62a69678e1a" +checksum = "b15b079e08471a9dbfe1e48b2c7439c85aa2a055cbd54eddd8bd257b0a7dbb29" dependencies = [ "byteorder", "combine 3.8.1", @@ -6726,11 +6802,11 @@ dependencies = [ [[package]] name = "solana-sdk-ids" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1b6d6aaf60669c592838d382266b173881c65fb1cdec83b37cb8ce7cb89f9ad" +checksum = "def234c1956ff616d46c9dd953f251fa7096ddbaa6d52b165218de97882b7280" dependencies = [ - "solana-pubkey 3.0.0", + "solana-address 2.0.0", ] [[package]] @@ -6742,7 +6818,7 @@ dependencies = [ "bs58 0.5.1", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -6807,22 +6883,22 @@ dependencies = [ [[package]] name = "solana-sha256-hasher" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9b912ba6f71cb202c0c3773ec77bf898fa9fe0c78691a2d6859b3b5b8954719" +checksum = "db7dc3011ea4c0334aaaa7e7128cb390ecf546b28d412e9bf2064680f57f588f" dependencies = [ "sha2", - "solana-define-syscall 3.0.0", - "solana-hash 3.0.0", + "solana-define-syscall 4.0.1", + "solana-hash 4.0.1", ] [[package]] name = "solana-short-vec" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69d029da5428fc1c57f7d49101b2077c61f049d4112cd5fb8456567cc7d2638" +checksum = "79fb1809a32cfcf7d9c47b7070a92fa17cdb620ab5829e9a8a9ff9d138a7a175" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -6832,7 +6908,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bb8057cc0e9f7b5e89883d49de6f407df655bb6f3a71d0b7baf9986a2218fd9" dependencies = [ "ed25519-dalek", - "five8", + "five8 0.2.1", "serde", "serde-big-array", "serde_derive", @@ -6858,7 +6934,7 @@ checksum = "80a293f952293281443c04f4d96afd9d547721923d596e92b4377ed2360f1746" dependencies = [ "serde", "serde_derive", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-sdk-ids", "solana-sysvar-id", ] @@ -6907,12 +6983,11 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c50b3b9e5f230f18ba729a266ec0e872926e317c1a8da0cfbc030c6f5a204c" +checksum = "c3b8157f227922e5a5ba31569dea9759432fe276f20182ce0ce3ee6f5275e0e8" dependencies = [ "arc-swap", - "async-channel 1.9.0", "bytes", "crossbeam-channel", "dashmap", @@ -6920,7 +6995,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.12.0", + "indexmap 2.12.1", "itertools 0.12.1", "libc", "log", @@ -6931,7 +7006,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.34", + "rustls 0.23.35", "smallvec", "socket2 0.6.1", "solana-keypair", @@ -6956,9 +7031,18 @@ dependencies = [ [[package]] name = "solana-svm-feature-set" -version = "3.0.8" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d848a90245dbaffeb8c43492eb902c2b988200a1b59b3959435d17abcea3eb3d" + +[[package]] +name = "solana-svm-type-overrides" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c67a4a533a53811f1e31829374d5ab0761e6b4180c7145d69b5c62ab4a9a24af" +checksum = "cddcdb9981c7838ceb16bb97929c5cab015b0bdcb12243720000f8e44c9a5af2" +dependencies = [ + "rand 0.8.5", +] [[package]] name = "solana-system-interface" @@ -6977,9 +7061,9 @@ dependencies = [ [[package]] name = "solana-sysvar" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63205e68d680bcc315337dec311b616ab32fea0a612db3b883ce4de02e0953f9" +checksum = "3205cc7db64a0f1a20b7eb2405773fa64e45f7fe0fc7a73e50e90eca6b2b0be7" dependencies = [ "base64 0.22.1", "bincode", @@ -6988,17 +7072,17 @@ dependencies = [ "serde_derive", "solana-account-info", "solana-clock", - "solana-define-syscall 3.0.0", + "solana-define-syscall 4.0.1", "solana-epoch-rewards", "solana-epoch-schedule", "solana-fee-calculator", - "solana-hash 3.0.0", + "solana-hash 4.0.1", "solana-instruction", "solana-last-restart-slot", "solana-program-entrypoint", "solana-program-error", "solana-program-memory", - "solana-pubkey 3.0.0", + "solana-pubkey 4.0.0", "solana-rent", "solana-sdk-ids", "solana-sdk-macro", @@ -7009,11 +7093,11 @@ dependencies = [ [[package]] name = "solana-sysvar-id" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5051bc1a16d5d96a96bc33b5b2ec707495c48fe978097bdaba68d3c47987eb32" +checksum = "17358d1e9a13e5b9c2264d301102126cf11a47fd394cdf3dec174fe7bc96e1de" dependencies = [ - "solana-pubkey 3.0.0", + "solana-address 2.0.0", "solana-sdk-ids", ] @@ -7025,11 +7109,11 @@ checksum = "0ced92c60aa76ec4780a9d93f3bd64dfa916e1b998eacc6f1c110f3f444f02c9" [[package]] name = "solana-tls-utils" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b3cf5ccc8e890e2f22ca194402b8e2039c884605abe1c3a71ec85ccb8fecdec" +checksum = "7f31ba4cf689b1adfd392370de1998bb5d8cdcd07c11efa8e08aa2dabeac7be7" dependencies = [ - "rustls 0.23.34", + "rustls 0.23.35", "solana-keypair", "solana-pubkey 3.0.0", "solana-signer", @@ -7038,14 +7122,14 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca9ea8a8ad7be6c899cfcf4890379c8041e734e632f31175b9331f0964defb17" +checksum = "b618992d02477c0300e89ad2fe0d109bcd2a20392f2c11c106d9f208a2124682" dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.12.0", + "indexmap 2.12.1", "indicatif", "log", "rayon", @@ -7072,15 +7156,15 @@ dependencies = [ [[package]] name = "solana-transaction" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64928e6af3058dcddd6da6680cbe08324b4e071ad73115738235bbaa9e9f72a5" +checksum = "2ceb2efbf427a91b884709ffac4dac29117752ce1e37e9ae04977e450aa0bb76" dependencies = [ "bincode", "serde", "serde_derive", - "solana-address", - "solana-hash 3.0.0", + "solana-address 2.0.0", + "solana-hash 4.0.1", "solana-instruction", "solana-instruction-error", "solana-message", @@ -7094,13 +7178,12 @@ dependencies = [ [[package]] name = "solana-transaction-context" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81e203a134fb6de363aa5c8b5faf7e7b27719b9fb5711c7e91a28bdffbe58ed" +checksum = "1bd55fe81fbc36ee00fde8233764b1f60c141e93a069932f126b707a515b8199" dependencies = [ "bincode", "serde", - "serde_derive", "solana-account", "solana-instruction", "solana-instructions-sysvar", @@ -7124,9 +7207,9 @@ dependencies = [ [[package]] name = "solana-transaction-metrics-tracker" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729db9e09657aec3922fb09fa7549912f7cb4de5845317ebb738caa4560369cd" +checksum = "b2255338b7be49a8d49009771daca0391c76a0cf18f8053cb411bf304302c063" dependencies = [ "base64 0.22.1", "bincode", @@ -7140,9 +7223,9 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22425e57cda6b78da1644230d4625bfb2a32c4fb12f011436fa3be441752d502" +checksum = "e6cd9bfed22fcef7bd3c5a2ddde81eeeb1f300df73a6f11366d27621333da80c" dependencies = [ "Inflector", "agave-reserved-account-keys", @@ -7152,12 +7235,11 @@ dependencies = [ "bs58 0.5.1", "log", "serde", - "serde_derive", "serde_json", "solana-account-decoder", "solana-address-lookup-table-interface", "solana-clock", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-instruction", "solana-loader-v2-interface", "solana-loader-v3-interface", @@ -7184,15 +7266,14 @@ dependencies = [ [[package]] name = "solana-transaction-status-client-types" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6ccc4c0bad50ebd910936e113b4fb9872f33cb17c896c5b02c005f91caa131" +checksum = "c3a1d4fd9ba4f6a301bb4dae9411bfab458e1b3ba66c3a6bc7333a14d35ca5d9" dependencies = [ "base64 0.22.1", "bincode", "bs58 0.5.1", "serde", - "serde_derive", "serde_json", "solana-account-decoder-client-types", "solana-commitment-config", @@ -7209,9 +7290,9 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acc1f343c1ebe61ca501ba6f3f413056f5a8ceddd5a6b6d729e5d421ba0976a" +checksum = "f8de6ebded5eca0efa5ff2246a3599c3b1f9da20a01bb171668d502c733e8208" dependencies = [ "async-trait", "solana-connection-cache", @@ -7225,24 +7306,23 @@ dependencies = [ [[package]] name = "solana-version" -version = "3.0.8" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3918648ecc0e8446c20a02aab2253b2e91ce8baf0af16f141292e6732778d4f1" +checksum = "d40d18d0807743a5fbd8f6c328d39cfb9bdcd63d196b9d168efdbfe27447315e" dependencies = [ "agave-feature-set", "rand 0.8.5", "semver", "serde", - "serde_derive", "solana-sanitize 3.0.1", "solana-serde-varint", ] [[package]] name = "solana-vote-interface" -version = "3.0.0" +version = "4.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66631ddbe889dab5ec663294648cd1df395ec9df7a4476e7b3e095604cfdb539" +checksum = "db6e123e16bfdd7a81d71b4c4699e0b29580b619f4cd2ef5b6aae1eb85e8979f" dependencies = [ "bincode", "cfg_eval", @@ -7252,7 +7332,7 @@ dependencies = [ "serde_derive", "serde_with", "solana-clock", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-instruction", "solana-instruction-error", "solana-pubkey 3.0.0", @@ -7354,7 +7434,7 @@ checksum = "d48cc11459e265d5b501534144266620289720b4c44522a47bc6b63cd295d2f3" dependencies = [ "bytemuck", "solana-program-error", - "solana-sha256-hasher 3.0.0", + "solana-sha256-hasher 3.1.0", "spl-discriminator-derive", ] @@ -7366,7 +7446,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -7378,7 +7458,7 @@ dependencies = [ "proc-macro2", "quote", "sha2", - "syn 2.0.108", + "syn 2.0.110", "thiserror 1.0.69", ] @@ -7423,9 +7503,9 @@ dependencies = [ [[package]] name = "spl-token-2022-interface" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0888304af6b3d839e435712e6c84025e09513017425ff62045b6b8c41feb77d9" +checksum = "2fcd81188211f4b3c8a5eba7fd534c7142f9dd026123b3472492782cc72f4dc6" dependencies = [ "arrayref", "bytemuck", @@ -7451,9 +7531,9 @@ dependencies = [ [[package]] name = "spl-token-confidential-transfer-proof-extraction" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a22217af69b7a61ca813f47c018afb0b00b02a74a4c70ff099cd4287740bc3d" +checksum = "879a9ebad0d77383d3ea71e7de50503554961ff0f4ef6cbca39ad126e6f6da3a" dependencies = [ "bytemuck", "solana-account-info", @@ -7471,9 +7551,9 @@ dependencies = [ [[package]] name = "spl-token-confidential-transfer-proof-generation" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a2b41095945dc15274b924b21ccae9b3ec9dc2fdd43dbc08de8c33bbcd915" +checksum = "a0cd59fce3dc00f563c6fa364d67c3f200d278eae681f4dc250240afcfe044b1" dependencies = [ "curve25519-dalek", "solana-zk-sdk", @@ -7723,9 +7803,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.108" +version = "2.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" dependencies = [ "proc-macro2", "quote", @@ -7767,7 +7847,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -7872,7 +7952,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -7883,7 +7963,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -7985,7 +8065,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -8009,23 +8089,13 @@ dependencies = [ "webpki", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.34", + "rustls 0.23.35", "tokio", ] @@ -8043,29 +8113,31 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.20.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857" dependencies = [ "futures-util", "log", - "rustls 0.21.12", + "rustls 0.23.35", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls 0.26.4", "tungstenite", - "webpki-roots 0.25.4", + "webpki-roots 0.26.11", ] [[package]] name = "tokio-util" -version = "0.7.16" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", + "futures-util", "pin-project-lite", "tokio", ] @@ -8100,7 +8172,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "toml_datetime 0.6.11", "winnow 0.5.40", ] @@ -8111,7 +8183,7 @@ version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "toml_datetime 0.7.3", "toml_parser", "winnow 0.7.13", @@ -8141,7 +8213,7 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -8168,7 +8240,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -8206,7 +8278,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.108", + "syn 2.0.110", "tempfile", "tonic-build", ] @@ -8240,7 +8312,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.0", + "indexmap 2.12.1", "pin-project-lite", "slab", "sync_wrapper 1.0.2", @@ -8292,7 +8364,7 @@ dependencies = [ "futures-util", "http 1.3.1", "http-body 1.0.1", - "iri-string 0.7.8", + "iri-string 0.7.9", "pin-project-lite", "tower 0.5.2", "tower-layer", @@ -8331,7 +8403,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -8394,23 +8466,22 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.20.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442" dependencies = [ - "byteorder", "bytes", "data-encoding", - "http 0.2.12", + "http 1.3.1", "httparse", "log", - "rand 0.8.5", - "rustls 0.21.12", + "rand 0.9.2", + "rustls 0.23.35", + "rustls-pki-types", "sha1", - "thiserror 1.0.69", - "url", + "thiserror 2.0.17", "utf-8", - "webpki-roots 0.24.0", + "webpki-roots 0.26.11", ] [[package]] @@ -8478,9 +8549,9 @@ checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" [[package]] name = "unit-prefix" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323402cff2dd658f39ca17c789b502021b3f18707c91cdf22e3838e1b4023817" +checksum = "81e544489bf3d8ef66c953931f56617f423cd4b5494be343d9b9d3dda037b9a3" [[package]] name = "universal-hash" @@ -8565,7 +8636,7 @@ version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "serde", "serde_json", "serde_yaml", @@ -8581,7 +8652,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -8604,9 +8675,9 @@ checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" +checksum = "7ba6f5989077681266825251a52748b8c1d8a4ad098cc37e440103d0ea717fc0" [[package]] name = "vcpkg" @@ -8711,7 +8782,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", "wasm-bindgen-shared", ] @@ -8769,9 +8840,9 @@ dependencies = [ [[package]] name = "webpki-root-certs" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d651ec480de84b762e7be71e6efa7461699c19d9e2c272c8d93455f567786e" +checksum = "ee3e3b5f5e80bc89f30ce8d0343bf4e5f12341c51f3e26cbeecbc7c85443e85b" dependencies = [ "rustls-pki-types", ] @@ -8787,24 +8858,18 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.24.0" +version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "rustls-webpki 0.101.7", + "webpki-roots 1.0.4", ] [[package]] name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - -[[package]] -name = "webpki-roots" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" dependencies = [ "rustls-pki-types", ] @@ -8859,9 +8924,9 @@ checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link 0.2.1", - "windows-result 0.4.1", - "windows-strings 0.5.1", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] @@ -8872,7 +8937,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -8883,15 +8948,9 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - [[package]] name = "windows-link" version = "0.2.1" @@ -8900,22 +8959,13 @@ checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-registry" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" -dependencies = [ - "windows-link 0.1.3", - "windows-result 0.3.4", - "windows-strings 0.4.2", -] - -[[package]] -name = "windows-result" -version = "0.3.4" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" dependencies = [ - "windows-link 0.1.3", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] @@ -8924,16 +8974,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", + "windows-link", ] [[package]] @@ -8942,7 +8983,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -8996,7 +9037,7 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -9051,7 +9092,7 @@ version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link 0.2.1", + "windows-link", "windows_aarch64_gnullvm 0.53.1", "windows_aarch64_msvc 0.53.1", "windows_i686_gnu 0.53.1", @@ -9337,7 +9378,7 @@ dependencies = [ "solana-account", "solana-account-decoder", "solana-clock", - "solana-hash 3.0.0", + "solana-hash 3.1.0", "solana-message", "solana-pubkey 3.0.0", "solana-signature", @@ -9370,28 +9411,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", "synstructure 0.13.2", ] [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "43fa6694ed34d6e57407afbccdeecfa268c470a7d2a5b0cf49ce9fcc345afb90" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "c640b22cd9817fae95be82f0d2f90b11f7605f6c319d16705c459b27ac2cbc26" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -9411,7 +9452,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", "synstructure 0.13.2", ] @@ -9432,7 +9473,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] @@ -9465,7 +9506,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.110", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index b74f4ef7..9b082a8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ name = "photon-indexer" publish = true readme = "README.md" repository = "https://github.com/helius-labs/photon" -version = "0.51.1" +version = "0.51.2" [[bin]] name = "photon" @@ -133,7 +133,7 @@ rand = "0.8.5" bincode = "1.3.3" rust-s3 = "0.34.0" cloud-storage = "0.11.1" - +jsonwebtoken = "9" [dev-dependencies] function_name = "0.3.0" diff --git a/src/api/api.rs b/src/api/api.rs index faf5f835..ab6c03a9 100644 --- a/src/api/api.rs +++ b/src/api/api.rs @@ -61,10 +61,6 @@ use super::{ get_indexer_slot::get_indexer_slot, }, }; -use crate::api::method::get_batch_address_update_info::{ - get_batch_address_update_info, GetBatchAddressUpdateInfoRequest, - GetBatchAddressUpdateInfoResponse, -}; use crate::api::method::get_compressed_account_proof::{ get_compressed_account_proof_v2, GetCompressedAccountProofResponseV2, }; @@ -81,6 +77,9 @@ use crate::api::method::get_multiple_compressed_account_proofs::{ use crate::api::method::get_queue_elements::{ get_queue_elements, GetQueueElementsRequest, GetQueueElementsResponse, }; +use crate::api::method::get_queue_info::{ + get_queue_info, GetQueueInfoRequest, GetQueueInfoResponse, +}; use crate::api::method::get_validity_proof::{ get_validity_proof, get_validity_proof_v2, GetValidityProofRequest, GetValidityProofRequestDocumentation, GetValidityProofRequestV2, GetValidityProofResponse, @@ -274,6 +273,13 @@ impl PhotonApi { get_queue_elements(self.db_conn.as_ref(), request).await } + pub async fn get_queue_info( + &self, + request: GetQueueInfoRequest, + ) -> Result { + get_queue_info(self.db_conn.as_ref(), request).await + } + pub async fn get_compressed_accounts_by_owner( &self, request: GetCompressedAccountsByOwnerRequest, @@ -381,25 +387,18 @@ impl PhotonApi { get_latest_non_voting_signatures(self.db_conn.as_ref(), request).await } - pub async fn get_batch_address_update_info( - &self, - request: GetBatchAddressUpdateInfoRequest, - ) -> Result { - get_batch_address_update_info(self.db_conn.as_ref(), request).await - } - pub fn method_api_specs() -> Vec { vec![ - OpenApiSpec { - name: "getBatchAddressUpdateInfo".to_string(), - request: Some(GetBatchAddressUpdateInfoRequest::schema().1), - response: GetBatchAddressUpdateInfoResponse::schema().1, - }, OpenApiSpec { name: "getQueueElements".to_string(), request: Some(GetQueueElementsRequest::schema().1), response: GetQueueElementsResponse::schema().1, }, + OpenApiSpec { + name: "getQueueInfo".to_string(), + request: Some(GetQueueInfoRequest::schema().1), + response: GetQueueInfoResponse::schema().1, + }, OpenApiSpec { name: "getCompressedAccount".to_string(), request: Some(CompressedAccountRequest::adjusted_schema()), diff --git a/src/api/method/get_batch_address_update_info.rs b/src/api/method/get_batch_address_update_info.rs deleted file mode 100644 index 71ae526c..00000000 --- a/src/api/method/get_batch_address_update_info.rs +++ /dev/null @@ -1,159 +0,0 @@ -use sea_orm::{ConnectionTrait, DatabaseConnection, Statement, TransactionTrait}; -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; - -use crate::api::error::PhotonApiError; -use crate::api::method::get_multiple_new_address_proofs::{ - get_multiple_new_address_proofs_helper, AddressWithTree, MerkleContextWithNewAddressProof, -}; -use crate::common::format_bytes; -use crate::common::typedefs::context::Context; -use crate::common::typedefs::hash::Hash; -use crate::common::typedefs::serializable_pubkey::SerializablePubkey; -use crate::ingester::parser::tree_info::TreeInfo; -use crate::ingester::persist::persisted_state_tree::get_subtrees; - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, Default)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct GetBatchAddressUpdateInfoRequest { - pub tree: SerializablePubkey, - pub start_queue_index: Option, - pub limit: u16, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, Default)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct AddressQueueIndex { - pub address: SerializablePubkey, - pub queue_index: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct GetBatchAddressUpdateInfoResponse { - pub context: Context, - pub start_index: u64, - pub addresses: Vec, - pub non_inclusion_proofs: Vec, - pub subtrees: Vec<[u8; 32]>, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct GetBatchAddressUpdateInfoResponseValue { - pub proof: Vec, - pub root: Hash, - pub leaf_index: u64, - pub leaf: Hash, - pub tree: Hash, - pub root_seq: u64, - pub tx_hash: Option, - pub account_hash: Hash, -} - -const MAX_ADDRESSES: usize = 4000; - -pub async fn get_batch_address_update_info( - conn: &DatabaseConnection, - request: GetBatchAddressUpdateInfoRequest, -) -> Result { - if request.limit as usize > MAX_ADDRESSES { - return Err(PhotonApiError::ValidationError(format!( - "Too many addresses requested {}. Maximum allowed: {}", - request.limit, MAX_ADDRESSES - ))); - } - - let limit = request.limit; - let merkle_tree_pubkey = request.tree; - let tree_info = TreeInfo::get(conn, &merkle_tree_pubkey.to_string()) - .await? - .ok_or_else(|| PhotonApiError::UnexpectedError("Failed to get tree info".to_string()))?; - - let merkle_tree = SerializablePubkey::from(merkle_tree_pubkey.0).to_bytes_vec(); - - let context = Context::extract(conn).await?; - let tx = conn.begin().await?; - crate::api::set_transaction_isolation_if_needed(&tx).await?; - - // 1. Get batch_start_index - let max_index_stmt = Statement::from_string( - tx.get_database_backend(), - format!( - "SELECT COALESCE(MAX(leaf_index + 1), 1) as max_index FROM indexed_trees WHERE tree = {}", - format_bytes(merkle_tree.clone(), tx.get_database_backend()) - ), - ); - let max_index_result = tx.query_one(max_index_stmt).await?; - let batch_start_index = match max_index_result { - Some(row) => row.try_get::("", "max_index")? as usize, - None => 1, - }; - - let offset_condition = match request.start_queue_index { - Some(start_queue_index) => format!("AND queue_index >= {}", start_queue_index), - None => String::new(), - }; - - // 2. Get queue elements from the address_queues table - let address_queue_stmt = Statement::from_string( - tx.get_database_backend(), - format!( - "SELECT tree, address, queue_index FROM address_queues - WHERE tree = {} - {} - ORDER BY queue_index ASC - LIMIT {}", - format_bytes(merkle_tree.clone(), tx.get_database_backend()), - offset_condition, - limit - ), - ); - - let queue_results = tx.query_all(address_queue_stmt).await?; - - // Early exit if no elements in the queue - if queue_results.is_empty() { - tx.commit().await?; - return Ok(GetBatchAddressUpdateInfoResponse { - context, - addresses: Vec::new(), - non_inclusion_proofs: Vec::new(), - subtrees: Vec::new(), - start_index: batch_start_index as u64, - }); - } - - // 3. Build arrays for addresses and addresses with trees. - let mut addresses = Vec::new(); - let mut addresses_with_trees = Vec::new(); - let serializable_tree = SerializablePubkey::try_from(merkle_tree.clone())?; - - for row in &queue_results { - let address: Vec = row.try_get("", "address")?; - let queue_index: i64 = row.try_get("", "queue_index")?; - let address_pubkey = SerializablePubkey::try_from(address.clone())?; - addresses_with_trees.push(AddressWithTree { - address: address_pubkey, - tree: serializable_tree, - }); - addresses.push(AddressQueueIndex { - address: address_pubkey, - queue_index: queue_index as u64, - }); - } - - // 4. Get non-inclusion proofs for each address. - let non_inclusion_proofs = - get_multiple_new_address_proofs_helper(&tx, addresses_with_trees, MAX_ADDRESSES, false) - .await?; - let subtrees = get_subtrees(&tx, merkle_tree, tree_info.height as usize).await?; - - Ok(GetBatchAddressUpdateInfoResponse { - context, - start_index: batch_start_index as u64, - addresses, - non_inclusion_proofs, - subtrees, - }) -} diff --git a/src/api/method/get_multiple_new_address_proofs.rs b/src/api/method/get_multiple_new_address_proofs.rs index a28c1935..9ad38568 100644 --- a/src/api/method/get_multiple_new_address_proofs.rs +++ b/src/api/method/get_multiple_new_address_proofs.rs @@ -78,7 +78,7 @@ pub async fn get_multiple_new_address_proofs_helper( for (idx, AddressWithTree { address, tree }) in addresses.iter().enumerate() { addresses_by_tree .entry(*tree) - .or_insert_with(Vec::new) + .or_default() .push((idx, *address)); } diff --git a/src/api/method/get_queue_elements.rs b/src/api/method/get_queue_elements.rs index cd6ef57e..4d4d80fd 100644 --- a/src/api/method/get_queue_elements.rs +++ b/src/api/method/get_queue_elements.rs @@ -1,47 +1,170 @@ +use crate::api::error::PhotonApiError; +use crate::api::method::get_multiple_new_address_proofs::{ + get_multiple_new_address_proofs_helper, AddressWithTree, +}; +use crate::common::format_bytes; +use crate::common::typedefs::context::Context; +use crate::common::typedefs::hash::Hash; +use crate::common::typedefs::serializable_pubkey::SerializablePubkey; +use crate::dao::generated::{accounts, state_trees}; +use crate::ingester::parser::tree_info::TreeInfo; +use crate::ingester::persist::get_multiple_compressed_leaf_proofs_by_indices; +use crate::{ingester::persist::persisted_state_tree::get_subtrees, monitor::queue_hash_cache}; +use light_batched_merkle_tree::constants::{ + DEFAULT_ADDRESS_ZKP_BATCH_SIZE, DEFAULT_ZKP_BATCH_SIZE, +}; +use light_compressed_account::hash_chain::create_hash_chain_from_slice; use light_compressed_account::QueueType; +use light_hasher::{Hasher, Poseidon}; use sea_orm::{ - ColumnTrait, Condition, DatabaseConnection, EntityTrait, FromQueryResult, QueryFilter, - QueryOrder, QuerySelect, TransactionTrait, + ColumnTrait, Condition, ConnectionTrait, DatabaseConnection, EntityTrait, FromQueryResult, + QueryFilter, QueryOrder, QuerySelect, Statement, TransactionTrait, }; - use serde::{Deserialize, Serialize}; +use solana_pubkey::Pubkey; +use std::collections::HashMap; use utoipa::ToSchema; -use crate::api::error::PhotonApiError; -use crate::common::typedefs::context::Context; -use crate::common::typedefs::hash::Hash; -use crate::common::typedefs::serializable_pubkey::SerializablePubkey; -use crate::dao::generated::accounts; -use crate::ingester::persist::get_multiple_compressed_leaf_proofs_by_indices; +const MAX_QUEUE_ELEMENTS: u16 = 30_000; +const MAX_QUEUE_ELEMENTS_SQLITE: u16 = 500; + +/// Encode tree node position as a single u64 +/// Format: [level: u8][position: 56 bits] +/// Level 0 = leaves, Level tree_height-1 = root +#[inline] +fn encode_node_index(level: u8, position: u64, tree_height: u8) -> u64 { + debug_assert!( + level <= tree_height, + "level {} > tree_height {}", + level, + tree_height + ); + ((level as u64) << 56) | position +} + +/// Convert leaf_index to node_index in binary tree format (root=1, children of N are 2N and 2N+1) +#[inline] +fn leaf_index_to_node_index(leaf_index: u32, tree_height: u32) -> i64 { + 2_i64.pow(tree_height - 1) + leaf_index as i64 +} + +struct StateQueueProofData { + proofs: Vec, + tree_height: u8, + /// Path nodes from DB: maps (node_idx in DB format) -> hash + path_nodes: HashMap, +} + +enum QueueData { + Output(OutputQueueData, Option), + Input(InputQueueData, Option), +} + +/// Parameters for requesting queue elements +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct QueueRequest { + pub limit: u16, + #[serde(skip_serializing_if = "Option::is_none")] + pub start_index: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub zkp_batch_size: Option, +} #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, Default)] #[serde(deny_unknown_fields, rename_all = "camelCase")] pub struct GetQueueElementsRequest { pub tree: Hash, - pub start_queue_index: Option, - pub limit: u16, - pub queue_type: u8, + + #[serde(skip_serializing_if = "Option::is_none")] + pub output_queue: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub input_queue: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub address_queue: Option, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] #[serde(deny_unknown_fields, rename_all = "camelCase")] pub struct GetQueueElementsResponse { pub context: Context, - pub value: Vec, - pub first_value_queue_index: u64, + + #[serde(skip_serializing_if = "Option::is_none")] + pub state_queue: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub address_queue: Option, } -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)] +/// A tree node with its encoded index and hash +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, Default)] #[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct GetQueueElementsResponseValue { - pub proof: Vec, - pub root: Hash, - pub leaf_index: u64, - pub leaf: Hash, - pub tree: Hash, +pub struct Node { + /// Encoded node index: (level << 56) | position + pub index: u64, + pub hash: Hash, +} + +/// State queue data with shared tree nodes for output and input queues +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, Default)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct StateQueueData { + /// Shared deduplicated tree nodes for state queues (output + input) + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub nodes: Vec, + /// Initial root for the state tree (shared by output and input queues) + pub initial_root: Hash, + /// Sequence number of the root + pub root_seq: u64, + + #[serde(skip_serializing_if = "Option::is_none")] + pub output_queue: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub input_queue: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, Default)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct OutputQueueData { + pub leaf_indices: Vec, + pub account_hashes: Vec, + pub leaves: Vec, + pub first_queue_index: u64, + pub next_index: u64, + pub leaves_hash_chains: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, Default)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct InputQueueData { + pub leaf_indices: Vec, + pub account_hashes: Vec, + pub leaves: Vec, + pub tx_hashes: Vec, + pub nullifiers: Vec, + pub first_queue_index: u64, + pub leaves_hash_chains: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema, Default)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct AddressQueueData { + pub addresses: Vec, + pub queue_indices: Vec, + /// Deduplicated tree nodes - clients reconstruct proofs from these using low_element_indices + pub nodes: Vec, + pub low_element_indices: Vec, + pub low_element_values: Vec, + pub low_element_next_indices: Vec, + pub low_element_next_values: Vec, + pub leaves_hash_chains: Vec, + pub initial_root: Hash, + pub start_index: u64, + pub subtrees: Vec, pub root_seq: u64, - pub tx_hash: Option, - pub account_hash: Hash, } #[derive(FromQueryResult, Debug)] @@ -50,46 +173,199 @@ struct QueueElement { hash: Vec, tx_hash: Option>, nullifier_queue_index: Option, + nullifier: Option>, } pub async fn get_queue_elements( conn: &DatabaseConnection, request: GetQueueElementsRequest, ) -> Result { - let queue_type = QueueType::from(request.queue_type as u64); + let has_output_request = request.output_queue.is_some(); + let has_input_request = request.input_queue.is_some(); + let has_address_request = request.address_queue.is_some(); - if request.limit > 1000 { - return Err(PhotonApiError::ValidationError(format!( - "Too many queue elements requested {}. Maximum allowed: 1000", - request.limit - ))); + if !has_output_request && !has_input_request && !has_address_request { + return Err(PhotonApiError::ValidationError( + "At least one queue must be requested".to_string(), + )); } - let limit = request.limit; let context = Context::extract(conn).await?; + let tx = conn.begin().await?; crate::api::set_transaction_isolation_if_needed(&tx).await?; - let mut query_condition = - Condition::all().add(accounts::Column::Tree.eq(request.tree.to_vec())); + // Fetch output and input queues with their proof data + let (output_queue, output_proof_data) = if let Some(ref req) = request.output_queue { + match fetch_queue( + &tx, + &request.tree, + QueueType::OutputStateV2, + req.start_index, + req.limit, + req.zkp_batch_size, + ) + .await? + { + QueueData::Output(data, proof_data) => (Some(data), proof_data), + QueueData::Input(_, _) => unreachable!("OutputState should return Output"), + } + } else { + (None, None) + }; + + let (input_queue, input_proof_data) = if let Some(ref req) = request.input_queue { + match fetch_queue( + &tx, + &request.tree, + QueueType::InputStateV2, + req.start_index, + req.limit, + req.zkp_batch_size, + ) + .await? + { + QueueData::Input(data, proof_data) => (Some(data), proof_data), + QueueData::Output(_, _) => unreachable!("InputState should return Input"), + } + } else { + (None, None) + }; - match queue_type { + let state_queue = if has_output_request || has_input_request { + let (nodes, initial_root, root_seq) = + merge_state_queue_proofs(&output_proof_data, &input_proof_data)?; + + Some(StateQueueData { + nodes, + initial_root, + root_seq, + output_queue, + input_queue, + }) + } else { + None + }; + + let address_queue = if let Some(ref req) = request.address_queue { + let zkp_batch_size = req + .zkp_batch_size + .unwrap_or(DEFAULT_ADDRESS_ZKP_BATCH_SIZE as u16); + Some( + fetch_address_queue_v2( + &tx, + &request.tree, + req.start_index, + req.limit, + zkp_batch_size, + ) + .await?, + ) + } else { + None + }; + + tx.commit().await?; + + Ok(GetQueueElementsResponse { + context, + state_queue, + address_queue, + }) +} + +fn merge_state_queue_proofs( + output_proof_data: &Option, + input_proof_data: &Option, +) -> Result<(Vec, Hash, u64), PhotonApiError> { + let mut all_proofs: Vec<&crate::ingester::persist::MerkleProofWithContext> = Vec::new(); + let mut all_path_nodes: HashMap = HashMap::new(); + let mut tree_height: Option = None; + let mut initial_root: Option = None; + let mut root_seq: Option = None; + + // Collect proofs and path nodes from output queue + if let Some(ref proof_data) = output_proof_data { + tree_height = Some(proof_data.tree_height); + all_path_nodes.extend(proof_data.path_nodes.clone()); + for proof in &proof_data.proofs { + if initial_root.is_none() { + initial_root = Some(proof.root.clone()); + root_seq = Some(proof.root_seq); + } + all_proofs.push(proof); + } + } + + // Collect proofs and path nodes from input queue + if let Some(ref proof_data) = input_proof_data { + if tree_height.is_none() { + tree_height = Some(proof_data.tree_height); + } + all_path_nodes.extend(proof_data.path_nodes.clone()); + for proof in &proof_data.proofs { + if initial_root.is_none() { + initial_root = Some(proof.root.clone()); + root_seq = Some(proof.root_seq); + } + all_proofs.push(proof); + } + } + + if all_proofs.is_empty() || tree_height.is_none() { + return Ok((Vec::new(), Hash::default(), 0)); + } + + let height = tree_height.unwrap(); + let nodes = deduplicate_nodes_from_refs(&all_proofs, height, &all_path_nodes); + + Ok(( + nodes, + initial_root.unwrap_or_default(), + root_seq.unwrap_or_default(), + )) +} + +async fn fetch_queue( + tx: &sea_orm::DatabaseTransaction, + tree: &Hash, + queue_type: QueueType, + start_index: Option, + limit: u16, + zkp_batch_size_hint: Option, +) -> Result { + if limit > MAX_QUEUE_ELEMENTS { + return Err(PhotonApiError::ValidationError(format!( + "Too many queue elements requested {}. Maximum allowed: {}", + limit, MAX_QUEUE_ELEMENTS + ))); + } + + let mut query_condition = Condition::all().add(accounts::Column::Tree.eq(tree.to_vec())); + + let query = match queue_type { QueueType::InputStateV2 => { query_condition = query_condition .add(accounts::Column::NullifierQueueIndex.is_not_null()) - .add(accounts::Column::NullifiedInTree.eq(false)); - if let Some(start_queue_index) = request.start_queue_index { + .add(accounts::Column::NullifiedInTree.eq(false)) + .add(accounts::Column::Spent.eq(true)); + if let Some(start_queue_index) = start_index { query_condition = query_condition - .add(accounts::Column::NullifierQueueIndex.gte(start_queue_index as i64)) - .add(accounts::Column::NullifiedInTree.eq(false)); + .add(accounts::Column::NullifierQueueIndex.gte(start_queue_index as i64)); } + accounts::Entity::find() + .filter(query_condition) + .order_by_asc(accounts::Column::NullifierQueueIndex) } QueueType::OutputStateV2 => { query_condition = query_condition.add(accounts::Column::InOutputQueue.eq(true)); - if let Some(start_queue_index) = request.start_queue_index { + if let Some(start_queue_index) = start_index { query_condition = query_condition.add(accounts::Column::LeafIndex.gte(start_queue_index as i64)); } + accounts::Entity::find() + .filter(query_condition) + .order_by_asc(accounts::Column::LeafIndex) } _ => { return Err(PhotonApiError::ValidationError(format!( @@ -97,90 +373,704 @@ pub async fn get_queue_elements( queue_type ))) } - } - - let query = match queue_type { - QueueType::InputStateV2 => accounts::Entity::find() - .filter(query_condition) - .order_by_asc(accounts::Column::NullifierQueueIndex), - QueueType::OutputStateV2 => accounts::Entity::find() - .filter(query_condition) - .order_by_asc(accounts::Column::LeafIndex), - _ => { - return Err(PhotonApiError::ValidationError(format!( - "Invalid queue type: {:?}", - queue_type - ))) - } }; - let queue_elements: Vec = query + let mut queue_elements: Vec = query .limit(limit as u64) .into_model::() - .all(&tx) + .all(tx) .await .map_err(|e| { PhotonApiError::UnexpectedError(format!("DB error fetching queue elements: {}", e)) })?; - let indices: Vec = queue_elements.iter().map(|e| e.leaf_index as u64).collect(); - let (proofs, first_value_queue_index) = if !indices.is_empty() { - let first_value_queue_index = match queue_type { - QueueType::InputStateV2 => Ok(queue_elements[0].nullifier_queue_index.ok_or( - PhotonApiError::ValidationError("Nullifier queue index is missing".to_string()), - )? as u64), - QueueType::OutputStateV2 => Ok(queue_elements[0].leaf_index as u64), - _ => Err(PhotonApiError::ValidationError(format!( - "Invalid queue type: {:?}", - queue_type - ))), - }?; - let generated_proofs = get_multiple_compressed_leaf_proofs_by_indices( - &tx, - SerializablePubkey::from(request.tree.0), - indices.clone(), - ) - .await?; - if generated_proofs.len() != indices.len() { + + if queue_elements.is_empty() { + return Ok(match queue_type { + QueueType::OutputStateV2 => QueueData::Output(OutputQueueData::default(), None), + QueueType::InputStateV2 => QueueData::Input(InputQueueData::default(), None), + _ => unreachable!("Only OutputState and InputState are supported"), + }); + } + + let mut indices: Vec = queue_elements.iter().map(|e| e.leaf_index as u64).collect(); + let first_queue_index = match queue_type { + QueueType::InputStateV2 => { + queue_elements[0] + .nullifier_queue_index + .ok_or(PhotonApiError::ValidationError( + "Nullifier queue index is missing".to_string(), + ))? as u64 + } + QueueType::OutputStateV2 => queue_elements[0].leaf_index as u64, + _ => unreachable!("Only OutputState and InputState are supported"), + }; + if let Some(start) = start_index { + if first_queue_index > start { return Err(PhotonApiError::ValidationError(format!( - "Expected {} proofs for {} queue elements, but got {} proofs", - indices.len(), - queue_elements.len(), - generated_proofs.len() + "Requested start_index {} but first_queue_index {} is later (possible pruning)", + start, first_queue_index ))); } + } + + let serializable_tree = SerializablePubkey::from(tree.0); + + let tree_info = TreeInfo::get(tx, &serializable_tree.to_string()) + .await? + .ok_or_else(|| PhotonApiError::UnexpectedError("Failed to get tree info".to_string()))?; - (generated_proofs, first_value_queue_index) + // For output queue, next_index is where the elements will be appended. + // This is the minimum leaf_index of the queued elements (first_queue_index). + // We cannot use tree_metadata.next_index because it's only updated by the monitor, + // not by the ingester when processing batch events. + let next_index = if queue_type == QueueType::OutputStateV2 { + first_queue_index } else { - (vec![], 0) + 0 }; - tx.commit().await?; + let zkp_batch_size = zkp_batch_size_hint + .filter(|v| *v > 0) + .unwrap_or(DEFAULT_ZKP_BATCH_SIZE as u16) as usize; + if zkp_batch_size > 0 { + let full_batches = indices.len() / zkp_batch_size; + let allowed = full_batches * zkp_batch_size; + if allowed == 0 { + return Ok(match queue_type { + QueueType::OutputStateV2 => QueueData::Output(OutputQueueData::default(), None), + QueueType::InputStateV2 => QueueData::Input(InputQueueData::default(), None), + _ => unreachable!("Only OutputState and InputState are supported"), + }); + } + if indices.len() > allowed { + indices.truncate(allowed); + queue_elements.truncate(allowed); + } + } - let result: Vec = proofs - .into_iter() - .zip(queue_elements.iter()) - .map(|(proof, queue_element)| { - let tx_hash = queue_element - .tx_hash - .as_ref() - .map(|tx_hash| Hash::new(tx_hash.as_slice()).unwrap()); - let account_hash = Hash::new(queue_element.hash.as_slice()).unwrap(); - Ok(GetQueueElementsResponseValue { - proof: proof.proof, - root: proof.root, - leaf_index: proof.leaf_index as u64, - leaf: proof.hash, - tree: Hash::from(proof.merkle_tree.0.to_bytes()), - root_seq: proof.root_seq, - tx_hash, - account_hash, + let generated_proofs = + get_multiple_compressed_leaf_proofs_by_indices(tx, serializable_tree, indices.clone()) + .await?; + + if generated_proofs.len() != indices.len() { + return Err(PhotonApiError::ValidationError(format!( + "Expected {} proofs for {} queue elements, but got {} proofs", + indices.len(), + queue_elements.len(), + generated_proofs.len() + ))); + } + + // Fetch path nodes (ancestors) from DB for all leaves + let tree_height_u32 = tree_info.height as u32 + 1; + let path_nodes = + fetch_path_nodes_from_db(tx, &serializable_tree, &indices, tree_height_u32).await?; + + // Return proofs for merging at response level + let proof_data = Some(StateQueueProofData { + proofs: generated_proofs.clone(), + tree_height: tree_info.height as u8, + path_nodes, + }); + + let leaf_indices = indices.clone(); + let account_hashes: Vec = queue_elements + .iter() + .enumerate() + .map(|(idx, e)| { + Hash::new(e.hash.as_slice()).map_err(|err| { + PhotonApiError::UnexpectedError(format!( + "Invalid hash for queue element at index {} (leaf_index={}): {}", + idx, e.leaf_index, err + )) }) }) - .collect::>()?; + .collect::, PhotonApiError>>()?; + let leaves: Vec = generated_proofs.iter().map(|p| p.hash.clone()).collect(); - Ok(GetQueueElementsResponse { - context, - value: result, - first_value_queue_index, + let tree_pubkey_bytes: [u8; 32] = serializable_tree + .to_bytes_vec() + .as_slice() + .try_into() + .map_err(|_| PhotonApiError::UnexpectedError("Invalid tree pubkey bytes".to_string()))?; + let tree_pubkey = Pubkey::new_from_array(tree_pubkey_bytes); + + let batch_start_index = first_queue_index; + let cached = + queue_hash_cache::get_cached_hash_chains(tx, tree_pubkey, queue_type, batch_start_index) + .await + .map_err(|e| PhotonApiError::UnexpectedError(format!("Cache error: {}", e)))?; + + let expected_batch_count = indices.len() / zkp_batch_size; + let leaves_hash_chains = if !cached.is_empty() && cached.len() >= expected_batch_count { + let mut sorted = cached; + sorted.sort_by_key(|c| c.zkp_batch_index); + sorted + .into_iter() + .take(expected_batch_count) + .map(|entry| Hash::from(entry.hash_chain)) + .collect() + } else { + // Fall back to computing locally if cache is empty (e.g., monitor hasn't run yet) + log::warn!( + "No cached hash chains for {:?} queue (batch_start_index={}, cached={}, expected={})", + queue_type, + batch_start_index, + cached.len(), + expected_batch_count + ); + compute_state_queue_hash_chains(&queue_elements, queue_type, zkp_batch_size)? + }; + + Ok(match queue_type { + QueueType::OutputStateV2 => QueueData::Output( + OutputQueueData { + leaf_indices, + account_hashes, + leaves, + first_queue_index, + next_index, + leaves_hash_chains, + }, + proof_data, + ), + QueueType::InputStateV2 => { + let tx_hashes: Result, PhotonApiError> = queue_elements + .iter() + .enumerate() + .map(|(idx, e)| { + e.tx_hash + .as_ref() + .ok_or_else(|| { + PhotonApiError::UnexpectedError(format!( + "Missing tx_hash for spent queue element at index {} (leaf_index={})", + idx, e.leaf_index + )) + }) + .and_then(|tx| { + Hash::new(tx.as_slice()).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Invalid tx_hash: {}", e)) + }) + }) + }) + .collect(); + + let nullifiers: Result, PhotonApiError> = queue_elements + .iter() + .enumerate() + .map(|(idx, e)| { + e.nullifier + .as_ref() + .ok_or_else(|| { + PhotonApiError::UnexpectedError(format!( + "Missing nullifier for spent queue element at index {} (leaf_index={})", + idx, e.leaf_index + )) + }) + .and_then(|n| { + Hash::new(n.as_slice()).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Invalid nullifier: {}", e)) + }) + }) + }) + .collect(); + + QueueData::Input( + InputQueueData { + leaf_indices, + account_hashes, + leaves, + tx_hashes: tx_hashes?, + nullifiers: nullifiers?, + first_queue_index, + leaves_hash_chains, + }, + proof_data, + ) + } + _ => unreachable!("Only OutputState and InputState are supported"), }) } + +fn compute_state_queue_hash_chains( + queue_elements: &[QueueElement], + queue_type: QueueType, + zkp_batch_size: usize, +) -> Result, PhotonApiError> { + use light_compressed_account::hash_chain::create_hash_chain_from_slice; + + if zkp_batch_size == 0 || queue_elements.is_empty() { + return Ok(Vec::new()); + } + + let batch_count = queue_elements.len() / zkp_batch_size; + if batch_count == 0 { + return Ok(Vec::new()); + } + + let mut hash_chains = Vec::with_capacity(batch_count); + + for batch_idx in 0..batch_count { + let start = batch_idx * zkp_batch_size; + let end = start + zkp_batch_size; + let batch_elements = &queue_elements[start..end]; + + let mut values: Vec<[u8; 32]> = Vec::with_capacity(zkp_batch_size); + + for element in batch_elements { + let value: [u8; 32] = match queue_type { + QueueType::OutputStateV2 => element.hash.as_slice().try_into().map_err(|_| { + PhotonApiError::UnexpectedError(format!( + "Invalid hash length: expected 32 bytes, got {}", + element.hash.len() + )) + })?, + QueueType::InputStateV2 => element + .nullifier + .as_ref() + .ok_or_else(|| { + PhotonApiError::UnexpectedError( + "Missing nullifier for InputState queue element".to_string(), + ) + })? + .as_slice() + .try_into() + .map_err(|_| { + PhotonApiError::UnexpectedError( + "Invalid nullifier length: expected 32 bytes".to_string(), + ) + })?, + _ => { + return Err(PhotonApiError::ValidationError(format!( + "Unsupported queue type for hash chain computation: {:?}", + queue_type + ))) + } + }; + values.push(value); + } + + let hash_chain = create_hash_chain_from_slice(&values).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Hash chain computation error: {}", e)) + })?; + + hash_chains.push(Hash::from(hash_chain)); + } + + log::debug!( + "Computed {} hash chains for {:?} queue with {} elements (zkp_batch_size={})", + hash_chains.len(), + queue_type, + queue_elements.len(), + zkp_batch_size + ); + + Ok(hash_chains) +} + +async fn fetch_address_queue_v2( + tx: &sea_orm::DatabaseTransaction, + tree: &Hash, + start_queue_index: Option, + limit: u16, + zkp_batch_size: u16, +) -> Result { + let max_allowed = match tx.get_database_backend() { + sea_orm::DatabaseBackend::Sqlite => MAX_QUEUE_ELEMENTS_SQLITE, + _ => MAX_QUEUE_ELEMENTS, + }; + if limit > max_allowed { + return Err(PhotonApiError::ValidationError(format!( + "Too many addresses requested {}. Maximum allowed: {}", + limit, max_allowed + ))); + } + + let merkle_tree_bytes = tree.to_vec(); + let serializable_tree = + SerializablePubkey::try_from(merkle_tree_bytes.clone()).map_err(|_| { + PhotonApiError::UnexpectedError("Failed to parse merkle tree pubkey".to_string()) + })?; + + let tree_info = TreeInfo::get(tx, &serializable_tree.to_string()) + .await? + .ok_or_else(|| PhotonApiError::UnexpectedError("Failed to get tree info".to_string()))?; + + let max_index_stmt = Statement::from_string( + tx.get_database_backend(), + format!( + "SELECT COALESCE(MAX(leaf_index + 1), 1) as max_index FROM indexed_trees WHERE tree = {}", + format_bytes(merkle_tree_bytes.clone(), tx.get_database_backend()) + ), + ); + let max_index_result = tx.query_one(max_index_stmt).await?; + let batch_start_index = match max_index_result { + Some(row) => row.try_get::("", "max_index")? as usize, + None => 1, + }; + + let offset_condition = match start_queue_index { + Some(start) => format!("AND queue_index >= {}", start), + None => String::new(), + }; + + let address_queue_stmt = Statement::from_string( + tx.get_database_backend(), + format!( + "SELECT tree, address, queue_index FROM address_queues + WHERE tree = {} + {} + ORDER BY queue_index ASC + LIMIT {}", + format_bytes(merkle_tree_bytes.clone(), tx.get_database_backend()), + offset_condition, + limit + ), + ); + + let queue_results = tx.query_all(address_queue_stmt).await.map_err(|e| { + PhotonApiError::UnexpectedError(format!("DB error fetching address queue: {}", e)) + })?; + + let subtrees = get_subtrees(tx, merkle_tree_bytes.clone(), tree_info.height as usize) + .await? + .into_iter() + .map(Hash::from) + .collect(); + + if queue_results.is_empty() { + return Ok(AddressQueueData { + start_index: batch_start_index as u64, + subtrees, + ..Default::default() + }); + } + + let mut addresses = Vec::with_capacity(queue_results.len()); + let mut queue_indices = Vec::with_capacity(queue_results.len()); + let mut addresses_with_trees = Vec::with_capacity(queue_results.len()); + + for row in &queue_results { + let address: Vec = row.try_get("", "address")?; + let queue_index: i64 = row.try_get("", "queue_index")?; + let address_pubkey = SerializablePubkey::try_from(address.clone()).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Failed to parse address: {}", e)) + })?; + + addresses.push(address_pubkey); + queue_indices.push(queue_index as u64); + addresses_with_trees.push(AddressWithTree { + address: address_pubkey, + tree: serializable_tree, + }); + } + + let non_inclusion_proofs = get_multiple_new_address_proofs_helper( + tx, + addresses_with_trees, + max_allowed as usize, + false, + ) + .await?; + + if non_inclusion_proofs.len() != queue_results.len() { + return Err(PhotonApiError::ValidationError(format!( + "Expected {} proofs for {} queue elements, but got {} proofs", + queue_results.len(), + queue_results.len(), + non_inclusion_proofs.len() + ))); + } + + let mut nodes_map: HashMap = HashMap::new(); + let mut low_element_indices = Vec::with_capacity(non_inclusion_proofs.len()); + let mut low_element_values = Vec::with_capacity(non_inclusion_proofs.len()); + let mut low_element_next_indices = Vec::with_capacity(non_inclusion_proofs.len()); + let mut low_element_next_values = Vec::with_capacity(non_inclusion_proofs.len()); + + // Track which low_element_leaf_indices we've already processed to avoid redundant hash computations + let mut processed_leaf_indices: std::collections::HashSet = + std::collections::HashSet::new(); + + for proof in &non_inclusion_proofs { + let low_value = Hash::new(&proof.lowerRangeAddress.to_bytes_vec()).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Invalid low element value: {}", e)) + })?; + let next_value = Hash::new(&proof.higherRangeAddress.to_bytes_vec()).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Invalid next element value: {}", e)) + })?; + + low_element_indices.push(proof.lowElementLeafIndex as u64); + low_element_values.push(low_value.clone()); + low_element_next_indices.push(proof.nextIndex as u64); + low_element_next_values.push(next_value.clone()); + + // Skip node computation if we've already processed this leaf index + // This is a huge optimization for empty/sparse trees where many addresses share the same low element + if processed_leaf_indices.contains(&proof.lowElementLeafIndex) { + continue; + } + processed_leaf_indices.insert(proof.lowElementLeafIndex); + + let leaf_idx = + encode_node_index(0, proof.lowElementLeafIndex as u64, tree_info.height as u8); + let hashed_leaf = compute_indexed_leaf_hash(&low_value, &next_value)?; + nodes_map.insert(leaf_idx, hashed_leaf.clone()); + + let mut pos = proof.lowElementLeafIndex as u64; + let mut current_hash = hashed_leaf; + + for (level, sibling_hash) in proof.proof.iter().enumerate() { + let sibling_pos = if pos % 2 == 0 { pos + 1 } else { pos - 1 }; + + let sibling_idx = encode_node_index(level as u8, sibling_pos, tree_info.height as u8); + nodes_map.insert(sibling_idx, sibling_hash.clone()); + + let parent_hash = if pos % 2 == 0 { + Poseidon::hashv(&[¤t_hash.0, &sibling_hash.0]) + } else { + Poseidon::hashv(&[&sibling_hash.0, ¤t_hash.0]) + }; + + match parent_hash { + Ok(hash) => { + current_hash = Hash::from(hash); + let parent_pos = pos / 2; + let parent_idx = + encode_node_index((level + 1) as u8, parent_pos, tree_info.height as u8); + nodes_map.insert(parent_idx, current_hash.clone()); + } + Err(_) => { + break; + } + } + + pos /= 2; + } + } + + let mut sorted_nodes: Vec<(u64, Hash)> = nodes_map.into_iter().collect(); + sorted_nodes.sort_by_key(|(idx, _)| *idx); + let nodes: Vec = sorted_nodes + .into_iter() + .map(|(index, hash)| Node { index, hash }) + .collect(); + + let initial_root = non_inclusion_proofs + .first() + .map(|proof| proof.root.clone()) + .unwrap_or_default(); + let root_seq = non_inclusion_proofs + .first() + .map(|proof| proof.rootSeq) + .unwrap_or_default(); + + let mut leaves_hash_chains = Vec::new(); + if !addresses.is_empty() && zkp_batch_size > 0 { + let batch_size = zkp_batch_size as usize; + let batch_count = addresses.len() / batch_size; + + let first_queue_index = queue_indices.first().copied().unwrap_or(0); + let cache_key = first_queue_index + 1; + + let tree_pubkey_bytes: [u8; 32] = serializable_tree + .to_bytes_vec() + .as_slice() + .try_into() + .map_err(|_| { + PhotonApiError::UnexpectedError("Invalid tree pubkey bytes".to_string()) + })?; + let tree_pubkey = Pubkey::new_from_array(tree_pubkey_bytes); + + let cached = queue_hash_cache::get_cached_hash_chains( + tx, + tree_pubkey, + QueueType::AddressV2, + cache_key, + ) + .await + .unwrap_or_default(); + + log::debug!( + "Address queue hash chain: first_queue_index={}, cache_key={}, cached_count={}, expected_count={}, addresses={}", + first_queue_index, + cache_key, + cached.len(), + batch_count, + addresses.len() + ); + + if !cached.is_empty() && cached.len() >= batch_count && batch_count > 0 { + log::debug!( + "Using {} of {} cached hash chains for cache_key={}", + batch_count, + cached.len(), + cache_key + ); + let mut sorted = cached; + sorted.sort_by_key(|c| c.zkp_batch_index); + for entry in sorted.into_iter().take(batch_count) { + leaves_hash_chains.push(Hash::from(entry.hash_chain)); + } + } else { + // Compute fresh hash chains from the actual addresses + log::debug!( + "Computing {} fresh hash chains for {} addresses (cache miss or insufficient)", + batch_count, + addresses.len() + ); + + for batch_idx in 0..batch_count { + let start = batch_idx * batch_size; + let end = start + batch_size; + let slice = &addresses[start..end]; + + let mut decoded = Vec::with_capacity(batch_size); + for pk in slice { + let bytes = pk.to_bytes_vec(); + let arr: [u8; 32] = bytes.as_slice().try_into().map_err(|_| { + PhotonApiError::UnexpectedError( + "Invalid address pubkey length for hash chain".to_string(), + ) + })?; + decoded.push(arr); + } + + let hash_chain = create_hash_chain_from_slice(&decoded).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Hash chain error: {}", e)) + })?; + + leaves_hash_chains.push(Hash::from(hash_chain)); + } + } + } + + Ok(AddressQueueData { + addresses, + queue_indices, + nodes, + low_element_indices, + low_element_values, + low_element_next_indices, + low_element_next_values, + leaves_hash_chains, + initial_root, + start_index: batch_start_index as u64, + subtrees, + root_seq, + }) +} + +/// Deduplicate nodes across all merkle proofs using pre-fetched path nodes from DB. +/// Returns a Vec sorted by index. +/// Uses path_nodes (DB node_idx -> hash) for parent hashes instead of computing them. +fn deduplicate_nodes_from_refs( + proofs: &[&crate::ingester::persist::MerkleProofWithContext], + tree_height: u8, + path_nodes: &HashMap, +) -> Vec { + let mut nodes_map: HashMap = HashMap::new(); + let tree_height_u32 = tree_height as u32 + 1; + + for proof_ctx in proofs { + let mut pos = proof_ctx.leaf_index as u64; + let mut db_node_idx = leaf_index_to_node_index(proof_ctx.leaf_index, tree_height_u32); + + // Store the leaf itself + let leaf_idx = encode_node_index(0, pos, tree_height); + nodes_map.insert(leaf_idx, proof_ctx.hash.clone()); + + // Walk up the proof path, storing sibling hashes and path node hashes from DB + for (level, sibling_hash) in proof_ctx.proof.iter().enumerate() { + let sibling_pos = if pos % 2 == 0 { pos + 1 } else { pos - 1 }; + + // Store the sibling (from proof) + let sibling_idx = encode_node_index(level as u8, sibling_pos, tree_height); + nodes_map.insert(sibling_idx, sibling_hash.clone()); + + // Move to parent + db_node_idx >>= 1; + pos /= 2; + + // Store the parent hash from DB (if available) + if let Some(parent_hash) = path_nodes.get(&db_node_idx) { + let parent_idx = encode_node_index((level + 1) as u8, pos, tree_height); + nodes_map.insert(parent_idx, parent_hash.clone()); + } + } + } + + let mut sorted_nodes: Vec<(u64, Hash)> = nodes_map.into_iter().collect(); + sorted_nodes.sort_by_key(|(idx, _)| *idx); + + sorted_nodes + .into_iter() + .map(|(index, hash)| Node { index, hash }) + .collect() +} + +fn compute_indexed_leaf_hash(low_value: &Hash, next_value: &Hash) -> Result { + let hashed = Poseidon::hashv(&[&low_value.0, &next_value.0]).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Failed to hash indexed leaf: {}", e)) + })?; + Ok(Hash::from(hashed)) +} + +/// Fetch path nodes (all ancestors from leaf to root) from the database. +/// Returns a map from node_idx (in DB binary tree format) to hash. +async fn fetch_path_nodes_from_db( + tx: &sea_orm::DatabaseTransaction, + tree: &SerializablePubkey, + leaf_indices: &[u64], + tree_height: u32, +) -> Result, PhotonApiError> { + use itertools::Itertools; + + if leaf_indices.is_empty() { + return Ok(HashMap::new()); + } + + let tree_bytes = tree.to_bytes_vec(); + + let all_path_indices: Vec = leaf_indices + .iter() + .flat_map(|&leaf_idx| { + let node_idx = leaf_index_to_node_index(leaf_idx as u32, tree_height); + let mut path = vec![node_idx]; + let mut current = node_idx; + while current > 1 { + current >>= 1; + path.push(current); + } + path + }) + .sorted() + .dedup() + .collect(); + + if all_path_indices.is_empty() { + return Ok(HashMap::new()); + } + + let path_nodes = state_trees::Entity::find() + .filter( + state_trees::Column::Tree + .eq(tree_bytes) + .and(state_trees::Column::NodeIdx.is_in(all_path_indices)), + ) + .all(tx) + .await + .map_err(|e| { + PhotonApiError::UnexpectedError(format!("Failed to fetch path nodes: {}", e)) + })?; + + let mut result = HashMap::new(); + for node in path_nodes { + let hash = Hash::try_from(node.hash).map_err(|e| { + PhotonApiError::UnexpectedError(format!("Invalid hash in path node: {}", e)) + })?; + result.insert(node.node_idx, hash); + } + + Ok(result) +} diff --git a/src/api/method/get_queue_info.rs b/src/api/method/get_queue_info.rs new file mode 100644 index 00000000..07fb880e --- /dev/null +++ b/src/api/method/get_queue_info.rs @@ -0,0 +1,159 @@ +use serde::{Deserialize, Serialize}; +use solana_pubkey::Pubkey; +use utoipa::ToSchema; + +use crate::api::error::PhotonApiError; +use crate::common::typedefs::context::Context; +use crate::dao::generated::{accounts, address_queues, tree_metadata}; +use light_compressed_account::{QueueType, TreeType}; +use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, PaginatorTrait, QueryFilter}; +use std::collections::HashMap; + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct GetQueueInfoRequest { + #[serde(default)] + pub trees: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct GetQueueInfoResponse { + pub queues: Vec, + pub slot: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct QueueInfo { + pub tree: String, + pub queue: String, + pub queue_type: u8, + pub queue_size: u64, +} + +async fn fetch_queue_sizes( + db: &DatabaseConnection, + tree_filter: Option>>, +) -> Result, u8), u64>, PhotonApiError> { + let mut result = HashMap::new(); + + let mut query = tree_metadata::Entity::find().filter( + tree_metadata::Column::TreeType + .is_in([TreeType::StateV2 as i32, TreeType::AddressV2 as i32]), + ); + + if let Some(trees) = tree_filter { + query = query.filter(tree_metadata::Column::TreePubkey.is_in(trees)); + } + + let trees = query + .all(db) + .await + .map_err(|e| PhotonApiError::UnexpectedError(format!("DB error: {}", e)))?; + + for tree in trees { + let tree_pubkey = tree.tree_pubkey.clone(); + + match tree.tree_type { + t if t == TreeType::StateV2 as i32 => { + let nullifier_count = accounts::Entity::find() + .filter(accounts::Column::Tree.eq(tree_pubkey.clone())) + .filter(accounts::Column::NullifierQueueIndex.is_not_null()) + .filter(accounts::Column::NullifiedInTree.eq(false)) + .count(db) + .await + .map_err(|e| PhotonApiError::UnexpectedError(format!("DB error: {}", e)))?; + + result.insert( + (tree_pubkey.clone(), QueueType::InputStateV2 as u8), + nullifier_count, + ); + + let output_queue_size = accounts::Entity::find() + .filter(accounts::Column::Tree.eq(tree_pubkey.clone())) + .filter(accounts::Column::InOutputQueue.eq(true)) + .count(db) + .await + .map_err(|e| PhotonApiError::UnexpectedError(format!("DB error: {}", e)))?; + + result.insert( + (tree_pubkey, QueueType::OutputStateV2 as u8), + output_queue_size, + ); + } + t if t == TreeType::AddressV2 as i32 => { + let address_count = address_queues::Entity::find() + .filter(address_queues::Column::Tree.eq(tree_pubkey.clone())) + .count(db) + .await + .map_err(|e| PhotonApiError::UnexpectedError(format!("DB error: {}", e)))?; + + result.insert((tree_pubkey, QueueType::AddressV2 as u8), address_count); + } + _ => continue, + } + } + + Ok(result) +} + +pub async fn get_queue_info( + db: &DatabaseConnection, + request: GetQueueInfoRequest, +) -> Result { + let tree_filter = if let Some(trees) = request.trees { + let parsed: Result>, _> = trees + .iter() + .map(|s| { + Pubkey::try_from(s.as_str()) + .map(|p| p.to_bytes().to_vec()) + .map_err(|e| PhotonApiError::ValidationError(format!("Invalid pubkey: {}", e))) + }) + .collect(); + Some(parsed?) + } else { + None + }; + + let queue_sizes = fetch_queue_sizes(db, tree_filter).await?; + + let tree_pubkeys: Vec> = queue_sizes + .keys() + .map(|(tree, _)| tree.clone()) + .collect::>() + .into_iter() + .collect(); + + let tree_metadata_list = tree_metadata::Entity::find() + .filter(tree_metadata::Column::TreePubkey.is_in(tree_pubkeys)) + .all(db) + .await + .map_err(|e| PhotonApiError::UnexpectedError(format!("DB error: {}", e)))?; + + let tree_to_queue: HashMap, Vec> = tree_metadata_list + .into_iter() + .map(|t| (t.tree_pubkey, t.queue_pubkey)) + .collect(); + + let queues: Vec = queue_sizes + .into_iter() + .map(|((tree_bytes, queue_type), size)| { + let queue_bytes = tree_to_queue + .get(&tree_bytes) + .cloned() + .unwrap_or_else(|| vec![0u8; 32]); + + QueueInfo { + tree: bs58::encode(&tree_bytes).into_string(), + queue: bs58::encode(&queue_bytes).into_string(), + queue_type, + queue_size: size, + } + }) + .collect(); + + let slot = Context::extract(db).await?.slot; + + Ok(GetQueueInfoResponse { queues, slot }) +} diff --git a/src/api/method/mod.rs b/src/api/method/mod.rs index 2498ec62..50073948 100644 --- a/src/api/method/mod.rs +++ b/src/api/method/mod.rs @@ -21,9 +21,8 @@ pub mod get_multiple_compressed_accounts; pub mod get_multiple_new_address_proofs; pub mod get_queue_elements; +pub mod get_queue_info; pub mod get_transaction_with_compression_info; pub mod get_validity_proof; -pub mod get_batch_address_update_info; - pub mod utils; diff --git a/src/api/rpc_server.rs b/src/api/rpc_server.rs index f29678aa..231648ca 100644 --- a/src/api/rpc_server.rs +++ b/src/api/rpc_server.rs @@ -194,16 +194,11 @@ fn build_rpc_module(api_and_indexer: PhotonApi) -> Result, api.get_queue_elements(payload).await.map_err(Into::into) })?; - module.register_async_method( - "getBatchAddressUpdateInfo", - |rpc_params, rpc_context| async move { - let api = rpc_context.as_ref(); - let payload = rpc_params.parse()?; - api.get_batch_address_update_info(payload) - .await - .map_err(Into::into) - }, - )?; + module.register_async_method("getQueueInfo", |rpc_params, rpc_context| async move { + let api = rpc_context.as_ref(); + let payload = rpc_params.parse()?; + api.get_queue_info(payload).await.map_err(Into::into) + })?; module.register_async_method( "getCompressedAccountsByOwner", diff --git a/src/ingester/fetchers/grpc.rs b/src/ingester/fetchers/grpc.rs index bc31b3bf..c2a6fad0 100644 --- a/src/ingester/fetchers/grpc.rs +++ b/src/ingester/fetchers/grpc.rs @@ -300,7 +300,10 @@ fn parse_transaction(transaction: SubscribeUpdateTransactionInfo) -> Transaction let meta = transaction.meta.unwrap(); let error = create_tx_error(meta.err.as_ref()); if let Err(e) = &error { - error!("Error parsing transaction error: {}. Error bytes: {:?}", e, meta.err); + error!( + "Error parsing transaction error: {}. Error bytes: {:?}", + e, meta.err + ); } let error = error.unwrap(); diff --git a/src/ingester/parser/indexer_events.rs b/src/ingester/parser/indexer_events.rs index 0a2ed88b..6a92a3f6 100644 --- a/src/ingester/parser/indexer_events.rs +++ b/src/ingester/parser/indexer_events.rs @@ -1,8 +1,8 @@ /// Copied from the Light repo. We copy them instead of importing from the Light repo in order /// to avoid having to import all of Light's dependencies. use borsh::{BorshDeserialize, BorshSerialize}; -use light_event::event::{BatchNullifyContext, NewAddress}; use light_compressed_account::Pubkey; +use light_event::event::{BatchNullifyContext, NewAddress}; #[derive(Debug, PartialEq, Eq, Default, Clone, BorshSerialize, BorshDeserialize)] pub struct OutputCompressedAccountWithPackedContext { diff --git a/src/ingester/parser/state_update.rs b/src/ingester/parser/state_update.rs index b1510b64..aa7a22bc 100644 --- a/src/ingester/parser/state_update.rs +++ b/src/ingester/parser/state_update.rs @@ -6,8 +6,8 @@ use crate::common::typedefs::serializable_pubkey::SerializablePubkey; use crate::ingester::parser::tree_info::TreeInfo; use borsh::{BorshDeserialize, BorshSerialize}; use jsonrpsee_core::Serialize; -use light_event::event::{BatchNullifyContext, NewAddress}; use light_compressed_account::TreeType; +use light_event::event::{BatchNullifyContext, NewAddress}; use log::debug; use solana_pubkey::Pubkey; use solana_signature::Signature; @@ -185,6 +185,10 @@ impl StateUpdate { // Track which account hashes we're keeping for filtering account_transactions later let mut kept_account_hashes = HashSet::new(); + // Add input (spent) account hashes - these don't have tree info but should be kept + // for account_transactions tracking + kept_account_hashes.extend(self.in_accounts.iter().cloned()); + // Filter out_accounts let out_accounts: Vec<_> = self .out_accounts diff --git a/src/ingester/parser/tx_event_parser_v2.rs b/src/ingester/parser/tx_event_parser_v2.rs index 2aa5bb4a..e879c8ba 100644 --- a/src/ingester/parser/tx_event_parser_v2.rs +++ b/src/ingester/parser/tx_event_parser_v2.rs @@ -10,8 +10,8 @@ use crate::ingester::parser::tx_event_parser::create_state_update_v1; use super::state_update::AddressQueueUpdate; use crate::common::typedefs::hash::Hash; -use light_event::parse::event_from_light_transaction; use light_compressed_account::Pubkey as LightPubkey; +use light_event::parse::event_from_light_transaction; use solana_pubkey::Pubkey; use solana_signature::Signature; @@ -25,10 +25,16 @@ pub fn parse_public_transaction_event_v2( instructions: &[Vec], accounts: Vec>, ) -> Option> { - let light_program_ids: Vec = program_ids.iter().map(|p| to_light_pubkey(p)).collect(); + let light_program_ids: Vec = + program_ids.iter().map(|p| to_light_pubkey(p)).collect(); let light_accounts: Vec> = accounts .into_iter() - .map(|acc_vec| acc_vec.into_iter().map(|acc| to_light_pubkey(&acc)).collect()) + .map(|acc_vec| { + acc_vec + .into_iter() + .map(|acc| to_light_pubkey(&acc)) + .collect() + }) .collect(); let events = event_from_light_transaction(&light_program_ids, instructions, light_accounts).ok()?; @@ -78,9 +84,7 @@ pub fn parse_public_transaction_event_v2( compression_lamports: public_transaction_event .event .compress_or_decompress_lamports, - pubkey_array: public_transaction_event - .event - .pubkey_array, + pubkey_array: public_transaction_event.event.pubkey_array, message: public_transaction_event.event.message, }; diff --git a/src/ingester/persist/indexed_merkle_tree/helpers.rs b/src/ingester/persist/indexed_merkle_tree/helpers.rs index c32841d2..9b486238 100644 --- a/src/ingester/persist/indexed_merkle_tree/helpers.rs +++ b/src/ingester/persist/indexed_merkle_tree/helpers.rs @@ -5,6 +5,7 @@ use crate::ingester::parser::tree_info::TreeInfo; use crate::ingester::persist::indexed_merkle_tree::HIGHEST_ADDRESS_PLUS_ONE; use ark_bn254::Fr; use light_compressed_account::TreeType; +use light_hasher::bigint::bigint_to_be_bytes_array; use light_poseidon::{Poseidon, PoseidonBytesHasher}; use sea_orm::{ConnectionTrait, TransactionTrait}; use solana_pubkey::Pubkey; @@ -15,10 +16,15 @@ pub fn compute_hash_by_tree_type( tree_type: TreeType, ) -> Result { match tree_type { - TreeType::AddressV1 => compute_range_node_hash_v1(range_node) - .map_err(|e| IngesterError::ParserError(format!("Failed to compute V1 hash: {}", e))), - TreeType::AddressV2 => compute_range_node_hash(range_node) - .map_err(|e| IngesterError::ParserError(format!("Failed to compute V2 hash: {}", e))), + // AddressV1 uses 3-field hash: H(value, next_index, next_value) + TreeType::AddressV1 => compute_range_node_hash_v1(range_node).map_err(|e| { + IngesterError::ParserError(format!("Failed to compute address v1 hash: {}", e)) + }), + // AddressV2 uses 2-field hash: H(value, next_value) + // next_index is stored but NOT included in hash (removed in commit e208fa1eb) + TreeType::AddressV2 => compute_range_node_hash_v2(range_node).map_err(|e| { + IngesterError::ParserError(format!("Failed to compute address v2 hash: {}", e)) + }), _ => Err(IngesterError::ParserError(format!( "Unsupported tree type for range node hash computation: {:?}", tree_type @@ -62,17 +68,8 @@ pub fn compute_hash_with_cache( compute_hash_by_tree_type(range_node, tree_type) } -pub fn compute_range_node_hash(node: &indexed_trees::Model) -> Result { - let mut poseidon = Poseidon::::new_circom(2).unwrap(); - Hash::try_from( - poseidon - .hash_bytes_be(&[&node.value, &node.next_value]) - .map_err(|e| IngesterError::ParserError(format!("Failed to compute hash v2: {}", e))) - .map(|x| x.to_vec())?, - ) - .map_err(|e| IngesterError::ParserError(format!("Failed to convert hash v2: {}", e))) -} - +/// Computes range node hash for AddressV1 indexed merkle trees. +/// Uses 3-field Poseidon hash: H(value, next_index, next_value) pub fn compute_range_node_hash_v1(node: &indexed_trees::Model) -> Result { let mut poseidon = Poseidon::::new_circom(3).unwrap(); let mut next_index_bytes = vec![0u8; 32]; @@ -88,42 +85,59 @@ pub fn compute_range_node_hash_v1(node: &indexed_trees::Model) -> Result Result { + let mut poseidon = Poseidon::::new_circom(2).unwrap(); + + Hash::try_from( + poseidon + .hash_bytes_be(&[&node.value, &node.next_value]) + .map_err(|e| IngesterError::ParserError(format!("Failed to compute hash v2: {}", e))) + .map(|x| x.to_vec())?, + ) + .map_err(|e| IngesterError::ParserError(format!("Failed to convert hash v2: {}", e))) +} + pub fn get_zeroeth_exclusion_range(tree: Vec) -> indexed_trees::Model { indexed_trees::Model { tree, leaf_index: 0, value: vec![0; 32], next_index: 0, - next_value: vec![0] - .into_iter() - .chain(HIGHEST_ADDRESS_PLUS_ONE.to_bytes_be()) - .collect(), + next_value: bigint_to_be_bytes_array::<32>(&HIGHEST_ADDRESS_PLUS_ONE) + .unwrap() + .to_vec(), seq: Some(0), } } pub fn get_zeroeth_exclusion_range_v1(tree: Vec) -> indexed_trees::Model { + use light_hasher::bigint::bigint_to_be_bytes_array; + indexed_trees::Model { tree, leaf_index: 0, value: vec![0; 32], next_index: 1, - next_value: vec![0] - .into_iter() - .chain(HIGHEST_ADDRESS_PLUS_ONE.to_bytes_be()) - .collect(), + next_value: bigint_to_be_bytes_array::<32>(&HIGHEST_ADDRESS_PLUS_ONE) + .unwrap() + .to_vec(), seq: Some(0), } } pub fn get_top_element(tree: Vec) -> indexed_trees::Model { + use light_hasher::bigint::bigint_to_be_bytes_array; + indexed_trees::Model { tree, leaf_index: 1, - value: vec![0] - .into_iter() - .chain(HIGHEST_ADDRESS_PLUS_ONE.to_bytes_be()) - .collect(), + value: bigint_to_be_bytes_array::<32>(&HIGHEST_ADDRESS_PLUS_ONE) + .unwrap() + .to_vec(), next_index: 0, next_value: vec![0; 32], seq: Some(0), diff --git a/src/ingester/persist/indexed_merkle_tree/mod.rs b/src/ingester/persist/indexed_merkle_tree/mod.rs index 770fe4d6..de61d1e5 100644 --- a/src/ingester/persist/indexed_merkle_tree/mod.rs +++ b/src/ingester/persist/indexed_merkle_tree/mod.rs @@ -7,7 +7,7 @@ mod proof; pub use helpers::{ compute_hash_by_tree_pubkey, compute_hash_by_tree_type, compute_hash_with_cache, - compute_range_node_hash, compute_range_node_hash_v1, get_top_element, + compute_range_node_hash_v1, compute_range_node_hash_v2, get_top_element, get_zeroeth_exclusion_range, get_zeroeth_exclusion_range_v1, }; diff --git a/src/ingester/persist/indexed_merkle_tree/proof.rs b/src/ingester/persist/indexed_merkle_tree/proof.rs index 355a7257..472874d7 100644 --- a/src/ingester/persist/indexed_merkle_tree/proof.rs +++ b/src/ingester/persist/indexed_merkle_tree/proof.rs @@ -150,7 +150,6 @@ fn proof_for_empty_tree_with_seq( .map_err(|e| PhotonApiError::UnexpectedError(format!("Failed to compute hash: {}", e)))?; let mut root = zeroeth_element_hash.clone().to_vec(); - for elem in proof.iter() { root = compute_parent_hash(root, elem.to_vec()) .map_err(|e| PhotonApiError::UnexpectedError(format!("Failed to compute hash: {e}")))?; @@ -180,27 +179,42 @@ pub async fn query_next_smallest_elements( where T: ConnectionTrait + TransactionTrait, { + if values.is_empty() { + return Ok(BTreeMap::new()); + } + let response = match txn_or_conn.get_database_backend() { - // HACK: I am executing SQL queries one by one in a loop because I am getting a weird syntax - // error when I am using parentheses. DatabaseBackend::Postgres => { - let sql_statements = values.iter().map(|value| { - format!( - "( SELECT * FROM indexed_trees WHERE tree = {} AND value < {} ORDER BY value DESC LIMIT 1 )", - format_bytes(tree.clone(), txn_or_conn.get_database_backend()), - format_bytes(value.clone(), txn_or_conn.get_database_backend()) - ) - }); - let full_query = sql_statements.collect::>().join(" UNION ALL "); - txn_or_conn - .query_all(Statement::from_string( - txn_or_conn.get_database_backend(), - full_query, - )) - .await - .map_err(|e| { - IngesterError::DatabaseError(format!("Failed to execute indexed query: {e}")) - })? + // Batch queries in chunks to avoid query plan explosion + // Each chunk uses UNION ALL which PostgreSQL optimizes well with index scans + const BATCH_SIZE: usize = 100; + let tree_bytes = format_bytes(tree.clone(), txn_or_conn.get_database_backend()); + let mut all_results = vec![]; + + for chunk in values.chunks(BATCH_SIZE) { + let sql_statements = chunk.iter().map(|value| { + format!( + "(SELECT * FROM indexed_trees WHERE tree = {} AND value < {} ORDER BY value DESC LIMIT 1)", + tree_bytes, + format_bytes(value.clone(), txn_or_conn.get_database_backend()) + ) + }); + let full_query = sql_statements.collect::>().join(" UNION ALL "); + + let chunk_results = txn_or_conn + .query_all(Statement::from_string( + txn_or_conn.get_database_backend(), + full_query, + )) + .await + .map_err(|e| { + IngesterError::DatabaseError(format!( + "Failed to execute indexed query: {e}" + )) + })?; + all_results.extend(chunk_results); + } + all_results } DatabaseBackend::Sqlite => { let mut response = vec![]; @@ -244,6 +258,111 @@ where Ok(indexed_tree) } +/// Optimized version for API use: Query the next smallest element for each input address. +/// Returns a HashMap mapping INPUT ADDRESS -> range node model. +/// This is O(1) lookup per address instead of O(n) scan in the caller. +pub async fn query_next_smallest_elements_by_address( + txn_or_conn: &T, + values: Vec>, + tree: Vec, +) -> Result, indexed_trees::Model>, IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + if values.is_empty() { + return Ok(HashMap::new()); + } + + let tree_bytes = format_bytes(tree.clone(), txn_or_conn.get_database_backend()); + let mut indexed_tree: HashMap, indexed_trees::Model> = + HashMap::with_capacity(values.len()); + + match txn_or_conn.get_database_backend() { + DatabaseBackend::Postgres => { + // Batch queries in chunks to avoid query plan explosion + // Each chunk uses UNION ALL which PostgreSQL optimizes well with index scans + // Include input_address as a constant column to track which result belongs to which input + const BATCH_SIZE: usize = 100; + + for chunk in values.chunks(BATCH_SIZE) { + let sql_statements = chunk.iter().map(|value| { + let value_bytes = format_bytes(value.clone(), txn_or_conn.get_database_backend()); + format!( + "(SELECT {val}::bytea as input_address, tree, leaf_index, value, next_index, next_value, seq \ + FROM indexed_trees WHERE tree = {tree} AND value < {val} ORDER BY value DESC LIMIT 1)", + val = value_bytes, + tree = tree_bytes, + ) + }); + let full_query = sql_statements.collect::>().join(" UNION ALL "); + + let chunk_results = txn_or_conn + .query_all(Statement::from_string( + txn_or_conn.get_database_backend(), + full_query, + )) + .await + .map_err(|e| { + IngesterError::DatabaseError(format!( + "Failed to execute indexed query: {e}" + )) + })?; + + for row in chunk_results { + let input_address: Vec = row.try_get("", "input_address")?; + let model = indexed_trees::Model { + tree: row.try_get("", "tree")?, + leaf_index: row.try_get("", "leaf_index")?, + value: row.try_get("", "value")?, + next_index: row.try_get("", "next_index")?, + next_value: row.try_get("", "next_value")?, + seq: row.try_get("", "seq")?, + }; + indexed_tree.insert(input_address, model); + } + } + } + DatabaseBackend::Sqlite => { + for value in values { + let value_bytes = format_bytes(value.clone(), txn_or_conn.get_database_backend()); + let full_query = format!( + "SELECT CAST({val} AS BLOB) as input_address, tree, leaf_index, value, next_index, next_value, seq \ + FROM indexed_trees WHERE tree = {tree} AND value < {val} ORDER BY value DESC LIMIT 1", + val = value_bytes, + tree = tree_bytes, + ); + let results = txn_or_conn + .query_all(Statement::from_string( + txn_or_conn.get_database_backend(), + full_query, + )) + .await + .map_err(|e| { + IngesterError::DatabaseError(format!( + "Failed to execute indexed query: {e}" + )) + })?; + + for row in results { + let input_address: Vec = row.try_get("", "input_address")?; + let model = indexed_trees::Model { + tree: row.try_get("", "tree")?, + leaf_index: row.try_get("", "leaf_index")?, + value: row.try_get("", "value")?, + next_index: row.try_get("", "next_index")?, + next_value: row.try_get("", "next_value")?, + seq: row.try_get("", "seq")?, + }; + indexed_tree.insert(input_address, model); + } + } + } + _ => unimplemented!(), + }; + + Ok(indexed_tree) +} + /// Batched version of get_exclusion_range_with_proof_v2 /// Returns a HashMap mapping each input address to its (model, proof) tuple pub async fn get_multiple_exclusion_ranges_with_proofs_v2( @@ -257,27 +376,24 @@ pub async fn get_multiple_exclusion_ranges_with_proofs_v2( return Ok(HashMap::new()); } - let btree = query_next_smallest_elements(txn, addresses.clone(), tree.clone()) - .await - .map_err(|e| { - PhotonApiError::UnexpectedError(format!( - "Failed to query next smallest elements: {}", - e - )) - })?; + // Query returns HashMap - O(1) lookup per address + let address_to_range = + query_next_smallest_elements_by_address(txn, addresses.clone(), tree.clone()) + .await + .map_err(|e| { + PhotonApiError::UnexpectedError(format!( + "Failed to query next smallest elements: {}", + e + )) + })?; let mut results = HashMap::new(); let mut leaf_nodes_with_indices = Vec::new(); let mut address_to_model: HashMap, indexed_trees::Model> = HashMap::new(); - // Process addresses that have range proofs + // Process addresses that have range proofs - O(1) lookup per address for address in &addresses { - let range_node = btree - .values() - .filter(|node| node.value < *address) - .max_by(|a, b| a.value.cmp(&b.value)); - - if let Some(range_node) = range_node { + if let Some(range_node) = address_to_range.get(address) { let hash = compute_hash_by_tree_type(range_node, tree_type).map_err(|e| { PhotonApiError::UnexpectedError(format!("Failed to compute hash: {}", e)) })?; diff --git a/src/ingester/persist/leaf_node.rs b/src/ingester/persist/leaf_node.rs index 07bbf598..c7110c2f 100644 --- a/src/ingester/persist/leaf_node.rs +++ b/src/ingester/persist/leaf_node.rs @@ -169,6 +169,26 @@ pub async fn persist_leaf_nodes( // We first build the query and then execute it because SeaORM has a bug where it always throws // an error if we do not insert a record in an insert statement. However, in this case, it's // expected not to insert anything if the key already exists. + let update_count = models_to_updates.len(); + let mut seq_values: Vec = models_to_updates + .values() + .filter_map(|m| match &m.seq { + sea_orm::ActiveValue::Set(opt) => *opt, + _ => None, + }) + .collect(); + seq_values.sort(); + let min_seq = seq_values.first().copied(); + let max_seq = seq_values.last().copied(); + + log::debug!( + "Persisting {} tree nodes (seq range: {:?} to {:?}) for tree {:?}", + update_count, + min_seq, + max_seq, + leaf_nodes.first().map(|n| &n.tree) + ); + let mut query = state_trees::Entity::insert_many(models_to_updates.into_values()) .on_conflict( OnConflict::columns([state_trees::Column::Tree, state_trees::Column::NodeIdx]) @@ -187,5 +207,11 @@ pub async fn persist_leaf_nodes( txn.execute(query).await.map_err(|e| { IngesterError::DatabaseError(format!("Failed to persist path nodes: {}", e)) })?; + + log::debug!( + "Successfully persisted {} nodes for tree {:?}", + update_count, + leaf_nodes.first().map(|n| &n.tree) + ); Ok(()) } diff --git a/src/ingester/persist/leaf_node_proof.rs b/src/ingester/persist/leaf_node_proof.rs index ccd36966..b1d98f00 100644 --- a/src/ingester/persist/leaf_node_proof.rs +++ b/src/ingester/persist/leaf_node_proof.rs @@ -21,7 +21,6 @@ pub async fn get_multiple_compressed_leaf_proofs_by_indices( return Ok(Vec::new()); } - // Convert SerializablePubkey to [u8; 32] for the helper function let tree_bytes = merkle_tree_pubkey.0.to_bytes(); let root_seq = get_current_tree_sequence(txn, &tree_bytes) .await @@ -30,6 +29,13 @@ pub async fn get_multiple_compressed_leaf_proofs_by_indices( })? as u32; let root_seq = if root_seq == 0 { None } else { Some(root_seq) }; + log::debug!( + "Fetching proofs for {} indices on tree {}, current root_seq: {:?}", + indices.len(), + merkle_tree_pubkey, + root_seq + ); + let existing_leaves = state_trees::Entity::find() .filter( state_trees::Column::LeafIdx @@ -180,7 +186,6 @@ pub async fn get_multiple_compressed_leaf_proofs( }) .collect::, PhotonApiError>>()?; - // Get tree height from the first leaf node (all should be from the same tree or we need to handle multiple trees) let tree_height = if !leaf_nodes_with_node_index.is_empty() { let first_tree = &leaf_nodes_with_node_index[0].0.tree; TreeInfo::height(txn, &first_tree.to_string()) @@ -283,5 +288,6 @@ pub async fn get_multiple_compressed_leaf_proofs_from_full_leaf_info( // for proof in proofs.iter() { // validate_proof(proof)?; // } + Ok(proofs) } diff --git a/src/ingester/persist/mod.rs b/src/ingester/persist/mod.rs index 0f7db46a..2d64cd1f 100644 --- a/src/ingester/persist/mod.rs +++ b/src/ingester/persist/mod.rs @@ -179,9 +179,11 @@ pub async fn persist_state_update( // Process each tree's nodes with the correct height for (tree_pubkey, tree_nodes) in nodes_by_tree { - let tree_info = tree_info_cache.get(&tree_pubkey).ok_or_else(|| { - IngesterError::ParserError(format!("Tree metadata not found for tree {}", tree_pubkey)) - })?; + let tree_info = tree_info_cache.get(&tree_pubkey) + .ok_or_else(|| IngesterError::ParserError(format!( + "Tree metadata not found for tree {}. Tree metadata must be synced before indexing.", + tree_pubkey + )))?; let tree_height = tree_info.height + 1; // +1 for indexed trees // Process in chunks diff --git a/src/ingester/persist/persisted_indexed_merkle_tree.rs b/src/ingester/persist/persisted_indexed_merkle_tree.rs index b5dff27e..c2874b09 100644 --- a/src/ingester/persist/persisted_indexed_merkle_tree.rs +++ b/src/ingester/persist/persisted_indexed_merkle_tree.rs @@ -4,8 +4,9 @@ use super::{compute_parent_hash, persisted_state_tree::ZERO_BYTES, MAX_SQL_INSER use crate::common::format_bytes; use crate::ingester::parser::tree_info::TreeInfo; use crate::ingester::persist::indexed_merkle_tree::{ - compute_hash_with_cache, compute_range_node_hash, compute_range_node_hash_v1, get_top_element, - get_zeroeth_exclusion_range, get_zeroeth_exclusion_range_v1, query_next_smallest_elements, + compute_hash_with_cache, compute_range_node_hash_v1, compute_range_node_hash_v2, + get_top_element, get_zeroeth_exclusion_range, get_zeroeth_exclusion_range_v1, + query_next_smallest_elements, }; use crate::ingester::persist::leaf_node::{persist_leaf_nodes, LeafNode}; use crate::{ @@ -48,7 +49,7 @@ fn ensure_zeroeth_element_exists( } _ => { let leaf = get_zeroeth_exclusion_range(sdk_tree.to_bytes().to_vec()); - let hash = compute_range_node_hash(&leaf).map_err(|e| { + let hash = compute_range_node_hash_v2(&leaf).map_err(|e| { IngesterError::ParserError(format!( "Failed to compute zeroeth element hash: {}", e @@ -491,13 +492,13 @@ pub async fn validate_tree(db_conn: &sea_orm::DatabaseConnection, tree: Serializ #[cfg(test)] mod tests { use super::*; - use crate::ingester::persist::indexed_merkle_tree::compute_range_node_hash; + use crate::ingester::persist::indexed_merkle_tree::compute_range_node_hash_v2; #[test] fn test_zeroeth_element_hash_is_not_zero_bytes_0() { let dummy_tree_id = vec![1u8; 32]; let zeroeth_element = get_zeroeth_exclusion_range(dummy_tree_id.clone()); - let zeroeth_element_hash_result = compute_range_node_hash(&zeroeth_element); + let zeroeth_element_hash_result = compute_range_node_hash_v2(&zeroeth_element); assert!( zeroeth_element_hash_result.is_ok(), "Failed to compute zeroeth_element_hash: {:?}", diff --git a/src/ingester/persist/persisted_state_tree.rs b/src/ingester/persist/persisted_state_tree.rs index 9a5a2fa2..6e65fb6f 100644 --- a/src/ingester/persist/persisted_state_tree.rs +++ b/src/ingester/persist/persisted_state_tree.rs @@ -186,7 +186,7 @@ pub async fn get_subtrees( .map_err(|e| PhotonApiError::UnexpectedError(format!("Failed to query nodes: {}", e)))?; if results.is_empty() { - return Ok(EMPTY_SUBTREES.to_vec()); + return Ok(EMPTY_SUBTREES[..tree_height].to_vec()); } for row in results { diff --git a/src/ingester/persist/spend.rs b/src/ingester/persist/spend.rs index 820443c3..3a377d69 100644 --- a/src/ingester/persist/spend.rs +++ b/src/ingester/persist/spend.rs @@ -75,6 +75,7 @@ pub async fn spend_input_accounts_batched( if accounts.is_empty() { return Ok(()); } + for account in accounts { accounts::Entity::update_many() .filter(accounts::Column::Hash.eq(account.account_hash.to_vec())) @@ -90,8 +91,10 @@ pub async fn spend_input_accounts_batched( accounts::Column::TxHash, Expr::value(account.tx_hash.to_vec()), ) + .col_expr(accounts::Column::Spent, Expr::value(true)) .exec(txn) .await?; } + Ok(()) } diff --git a/src/main.rs b/src/main.rs index 4a0fc54f..2b028eef 100644 --- a/src/main.rs +++ b/src/main.rs @@ -52,7 +52,7 @@ struct Args { db_url: Option, /// The start slot to begin indexing from. Defaults to the last indexed slot in the database plus - /// one. + /// one. #[arg(short, long)] start_slot: Option, diff --git a/src/monitor/mod.rs b/src/monitor/mod.rs index e6fad6ae..faf3b57c 100644 --- a/src/monitor/mod.rs +++ b/src/monitor/mod.rs @@ -1,4 +1,4 @@ -mod queue_hash_cache; +pub mod queue_hash_cache; mod queue_monitor; pub mod tree_metadata_sync; pub mod v1_tree_accounts; diff --git a/src/monitor/queue_hash_cache.rs b/src/monitor/queue_hash_cache.rs index 753bcbd5..00a360f8 100644 --- a/src/monitor/queue_hash_cache.rs +++ b/src/monitor/queue_hash_cache.rs @@ -1,6 +1,6 @@ use light_compressed_account::QueueType; use log::debug; -use sea_orm::{ColumnTrait, DatabaseConnection, DbErr, EntityTrait, QueryFilter, Set}; +use sea_orm::{ColumnTrait, ConnectionTrait, DbErr, EntityTrait, QueryFilter, Set}; use solana_pubkey::Pubkey; use crate::dao::generated::{prelude::QueueHashChains, queue_hash_chains}; @@ -11,13 +11,16 @@ pub struct CachedHashChain { } /// Store multiple hash chains in a single transaction -pub async fn store_hash_chains_batch( - db: &DatabaseConnection, +pub async fn store_hash_chains_batch( + db: &C, tree_pubkey: Pubkey, queue_type: QueueType, batch_start_index: u64, hash_chains: Vec<(usize, u64, [u8; 32])>, // (zkp_batch_index, start_offset, hash_chain) -) -> Result<(), DbErr> { +) -> Result<(), DbErr> +where + C: ConnectionTrait, +{ if hash_chains.is_empty() { return Ok(()); } @@ -63,12 +66,15 @@ pub async fn store_hash_chains_batch( } /// Retrieve cached hash chains for a specific tree and queue type -pub async fn get_cached_hash_chains( - db: &DatabaseConnection, +pub async fn get_cached_hash_chains( + db: &C, tree_pubkey: Pubkey, queue_type: QueueType, batch_start_index: u64, -) -> Result, DbErr> { +) -> Result, DbErr> +where + C: ConnectionTrait, +{ let queue_type_int = queue_type as i32; let results = QueueHashChains::find() @@ -94,3 +100,31 @@ pub async fn get_cached_hash_chains( chains.sort_by_key(|c| c.zkp_batch_index); Ok(chains) } + +pub async fn delete_hash_chains( + db: &C, + tree_pubkey: Pubkey, + queue_type: QueueType, + batch_start_index: u64, + zkp_batch_indices: Vec, +) -> Result +where + C: ConnectionTrait, +{ + if zkp_batch_indices.is_empty() { + return Ok(0); + } + + let queue_type_int = queue_type as i32; + let tree_bytes = tree_pubkey.to_bytes().to_vec(); + + let result = queue_hash_chains::Entity::delete_many() + .filter(queue_hash_chains::Column::TreePubkey.eq(tree_bytes)) + .filter(queue_hash_chains::Column::QueueType.eq(queue_type_int)) + .filter(queue_hash_chains::Column::BatchStartIndex.eq(batch_start_index as i64)) + .filter(queue_hash_chains::Column::ZkpBatchIndex.is_in(zkp_batch_indices)) + .exec(db) + .await?; + + Ok(result.rows_affected) +} diff --git a/src/monitor/queue_monitor.rs b/src/monitor/queue_monitor.rs index 339a55dc..81cceaf3 100644 --- a/src/monitor/queue_monitor.rs +++ b/src/monitor/queue_monitor.rs @@ -5,7 +5,7 @@ use light_batched_merkle_tree::{ use light_compressed_account::QueueType; use light_hasher::hash_chain::create_hash_chain_from_slice; use light_zero_copy::vec::ZeroCopyVecU64; -use log::{debug, error, trace, warn}; +use log::{debug, error, trace}; use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, QueryOrder}; use solana_client::nonblocking::rpc_client::RpcClient; use solana_pubkey::Pubkey; @@ -234,7 +234,15 @@ async fn verify_queue_hash_chains( let batch_start_index = on_chain_batches .map(|batches| batches[pending_batch_index].start_index) .unwrap_or(0); - let start_offset = batch_start_index + (num_inserted_zkps * zkp_batch_size); + + // For AddressV2 queues, batch.start_index is 1-based (tree leaf index) but + // address_queues.queue_index is 0-based. Apply -1 offset when querying. + // See: src/ingester/persist/persisted_batch_event/address.rs lines 51-55 + let start_offset = if queue_type == QueueType::AddressV2 { + batch_start_index.saturating_sub(1) + (num_inserted_zkps * zkp_batch_size) + } else { + batch_start_index + (num_inserted_zkps * zkp_batch_size) + }; let cached_chains = queue_hash_cache::get_cached_hash_chains(db, tree_pubkey, queue_type, batch_start_index) @@ -249,13 +257,15 @@ async fn verify_queue_hash_chains( let start_zkp_batch_idx = num_inserted_zkps as usize; let mut computed_chains = Vec::with_capacity(on_chain_chains.len()); - let mut chains_to_cache = Vec::new(); + let mut newly_computed: Vec<(usize, u64, [u8; 32])> = Vec::new(); + let mut used_cached_indices: Vec = Vec::new(); for zkp_batch_idx in 0..on_chain_chains.len() { let actual_zkp_idx = start_zkp_batch_idx + zkp_batch_idx; if let Some(&cached_chain) = cached_map.get(&(actual_zkp_idx as i32)) { computed_chains.push(cached_chain); + used_cached_indices.push(actual_zkp_idx as i32); } else { let chain_offset = start_offset + (zkp_batch_idx as u64 * zkp_batch_size); let chains = compute_hash_chains_from_db( @@ -270,30 +280,22 @@ async fn verify_queue_hash_chains( if !chains.is_empty() { computed_chains.push(chains[0]); - chains_to_cache.push((actual_zkp_idx, chain_offset, chains[0])); + newly_computed.push((actual_zkp_idx, chain_offset, chains[0])); } } } - if !chains_to_cache.is_empty() { - if let Err(e) = queue_hash_cache::store_hash_chains_batch( - db, - tree_pubkey, - queue_type, - batch_start_index, - chains_to_cache, - ) - .await - { - error!("Failed to cache hash chains: {:?}", e); - } - } + // Validate computed chains against on-chain values BEFORE caching + let mut valid_chains_to_cache: Vec<(usize, u64, [u8; 32])> = Vec::new(); + let mut invalid_cached_indices: Vec = Vec::new(); for (zkp_batch_idx, (on_chain, computed)) in on_chain_chains .iter() .zip(computed_chains.iter()) .enumerate() { + let actual_zkp_idx = start_zkp_batch_idx + zkp_batch_idx; + if on_chain != computed { divergences.push(HashChainDivergence { queue_info: QueueHashChainInfo { @@ -306,6 +308,55 @@ async fn verify_queue_hash_chains( actual_hash_chain: *on_chain, zkp_batch_index: zkp_batch_idx, }); + + // If this was from cache, mark for deletion + if used_cached_indices.contains(&(actual_zkp_idx as i32)) { + invalid_cached_indices.push(actual_zkp_idx as i32); + } + } else { + // Only cache newly computed chains that match on-chain + if let Some(entry) = newly_computed + .iter() + .find(|(idx, _, _)| *idx == actual_zkp_idx) + { + valid_chains_to_cache.push(*entry); + } + } + } + + // Delete invalid cached chains + if !invalid_cached_indices.is_empty() { + debug!( + "Deleting {} invalid cached hash chains for tree {} type {:?}", + invalid_cached_indices.len(), + tree_pubkey, + queue_type + ); + if let Err(e) = queue_hash_cache::delete_hash_chains( + db, + tree_pubkey, + queue_type, + batch_start_index, + invalid_cached_indices, + ) + .await + { + error!("Failed to delete invalid cached hash chains: {:?}", e); + } + } + + // Only cache validated chains + if !valid_chains_to_cache.is_empty() { + if let Err(e) = queue_hash_cache::store_hash_chains_batch( + db, + tree_pubkey, + queue_type, + batch_start_index, + valid_chains_to_cache, + ) + .await + { + error!("Failed to cache hash chains: {:?}", e); } } @@ -350,7 +401,9 @@ async fn compute_hash_chains_from_db( })?; hash_chains.push(hash_chain); } else { - warn!( + // Incomplete batches are expected during normal operation + // Only log at debug level to reduce noise + debug!( "Incomplete batch {} for tree {} type {:?} with {} elements when expecting {}", i, tree_pubkey, diff --git a/src/openapi/mod.rs b/src/openapi/mod.rs index ea1fc649..c9498dec 100644 --- a/src/openapi/mod.rs +++ b/src/openapi/mod.rs @@ -1,7 +1,6 @@ use std::collections::HashSet; use crate::api::api::PhotonApi; -use crate::api::method::get_batch_address_update_info::AddressQueueIndex; use crate::api::method::get_compressed_account_proof::{ GetCompressedAccountProofResponseValue, GetCompressedAccountProofResponseValueV2, }; @@ -21,7 +20,10 @@ use crate::api::method::get_multiple_compressed_accounts::{AccountList, AccountL use crate::api::method::get_multiple_new_address_proofs::AddressListWithTrees; use crate::api::method::get_multiple_new_address_proofs::AddressWithTree; use crate::api::method::get_multiple_new_address_proofs::MerkleContextWithNewAddressProof; -use crate::api::method::get_queue_elements::GetQueueElementsResponseValue; +use crate::api::method::get_queue_elements::{ + AddressQueueData, InputQueueData, Node, OutputQueueData, QueueRequest, StateQueueData, +}; +use crate::api::method::get_queue_info::QueueInfo; use crate::api::method::get_transaction_with_compression_info::CompressionInfoV2; use crate::api::method::get_transaction_with_compression_info::{ AccountWithOptionalTokenData, AccountWithOptionalTokenDataV2, ClosedAccountV2, @@ -82,9 +84,15 @@ const JSON_CONTENT_TYPE: &str = "application/json"; #[derive(OpenApi)] #[openapi(components(schemas( + InputQueueData, + OutputQueueData, + AddressQueueData, + StateQueueData, + Node, + QueueRequest, + QueueInfo, AccountProofInputs, AddressProofInputs, - AddressQueueIndex, SerializablePubkey, Context, Hash, @@ -95,7 +103,6 @@ const JSON_CONTENT_TYPE: &str = "application/json"; AccountContext, AccountWithContext, AccountV2, - GetQueueElementsResponseValue, TokenAccountList, TokenAccountListV2, TokenAccount, diff --git a/src/openapi/specs/api.yaml b/src/openapi/specs/api.yaml index 0434a085..f857ec24 100644 --- a/src/openapi/specs/api.yaml +++ b/src/openapi/specs/api.yaml @@ -3739,24 +3739,29 @@ paths: type: object required: - tree - - numElements - - queueType properties: - numElements: + tree: + $ref: '#/components/schemas/Hash' + outputQueueStartIndex: type: integer - format: uint16 + format: uint64 + nullable: true minimum: 0 - queueType: + outputQueueLimit: type: integer - format: uint8 + format: uint16 + nullable: true minimum: 0 - startOffset: + inputQueueStartIndex: type: integer format: uint64 nullable: true minimum: 0 - tree: - $ref: '#/components/schemas/Hash' + inputQueueLimit: + type: integer + format: uint16 + nullable: true + minimum: 0 additionalProperties: false required: true responses: @@ -3791,19 +3796,29 @@ paths: type: object required: - context - - value - - firstValueQueueIndex properties: context: $ref: '#/components/schemas/Context' - firstValueQueueIndex: + outputQueueElements: + type: array + nullable: true + items: + $ref: '#/components/schemas/GetQueueElementsResponseValue' + outputQueueIndex: type: integer format: uint64 + nullable: true minimum: 0 - value: + inputQueueElements: type: array + nullable: true items: $ref: '#/components/schemas/GetQueueElementsResponseValue' + inputQueueIndex: + type: integer + format: uint64 + nullable: true + minimum: 0 additionalProperties: false '429': description: Exceeded rate limit. diff --git a/src/snapshot/gcs_utils/mod.rs b/src/snapshot/gcs_utils/mod.rs new file mode 100644 index 00000000..89151cd0 --- /dev/null +++ b/src/snapshot/gcs_utils/mod.rs @@ -0,0 +1 @@ +pub mod resumable_upload; diff --git a/src/snapshot/gcs_utils/resumable_upload.rs b/src/snapshot/gcs_utils/resumable_upload.rs new file mode 100644 index 00000000..ad319cd9 --- /dev/null +++ b/src/snapshot/gcs_utils/resumable_upload.rs @@ -0,0 +1,426 @@ +use anyhow::{anyhow, Context, Result}; +use bytes::Bytes; +use futures::{pin_mut, Stream, StreamExt}; +use log::{debug, info, warn}; +use reqwest::header::{CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE}; +use reqwest::Client; +use std::time::Duration; +use tokio::time::sleep; + +// 8 MB chunk size (GCS recommends multiples of 256KB, minimum 256KB for resumable) +const CHUNK_SIZE: usize = 8 * 1024 * 1024; +const MAX_RETRIES: u32 = 5; +const INITIAL_BACKOFF_MS: u64 = 1000; + +/// Performs a resumable upload to Google Cloud Storage. +/// This handles large files by uploading in chunks and supports resuming on failure. +pub async fn resumable_upload( + bucket: &str, + object_name: &str, + byte_stream: impl Stream> + Send + 'static, + access_token: &str, +) -> Result<()> { + // Step 1: Initiate the resumable upload session + let upload_uri = initiate_resumable_upload(bucket, object_name, access_token).await?; + info!( + "Initiated resumable upload for {}/{}, upload URI obtained", + bucket, object_name + ); + + // Step 2: Upload chunks + upload_chunks(&upload_uri, byte_stream, access_token).await?; + + info!( + "Successfully completed resumable upload for {}/{}", + bucket, object_name + ); + Ok(()) +} + +/// Initiates a resumable upload session and returns the upload URI +async fn initiate_resumable_upload( + bucket: &str, + object_name: &str, + access_token: &str, +) -> Result { + let client = Client::new(); + let url = format!( + "https://storage.googleapis.com/upload/storage/v1/b/{}/o?uploadType=resumable&name={}", + bucket, object_name + ); + + for attempt in 0..MAX_RETRIES { + let response = client + .post(&url) + .header("Authorization", format!("Bearer {}", access_token)) + .header(CONTENT_TYPE, "application/json") + .header("X-Upload-Content-Type", "application/octet-stream") + .body("{}") + .send() + .await; + + match response { + Ok(resp) => { + if resp.status().is_success() { + let upload_uri = resp + .headers() + .get("Location") + .ok_or_else(|| anyhow!("No Location header in resumable upload response"))? + .to_str() + .context("Invalid Location header")? + .to_string(); + return Ok(upload_uri); + } else if resp.status().is_server_error() || resp.status().as_u16() == 429 { + // Retry on 5xx or 429 (rate limit) + let backoff = INITIAL_BACKOFF_MS * 2u64.pow(attempt); + warn!( + "Resumable upload initiation failed with status {}, retrying in {}ms (attempt {}/{})", + resp.status(), + backoff, + attempt + 1, + MAX_RETRIES + ); + sleep(Duration::from_millis(backoff)).await; + } else { + let status = resp.status(); + let body = resp.text().await.unwrap_or_default(); + return Err(anyhow!( + "Failed to initiate resumable upload: {} - {}", + status, + body + )); + } + } + Err(e) => { + let backoff = INITIAL_BACKOFF_MS * 2u64.pow(attempt); + warn!( + "Resumable upload initiation request failed: {}, retrying in {}ms (attempt {}/{})", + e, backoff, attempt + 1, MAX_RETRIES + ); + sleep(Duration::from_millis(backoff)).await; + } + } + } + + Err(anyhow!( + "Failed to initiate resumable upload after {} retries", + MAX_RETRIES + )) +} + +/// Uploads data in chunks to the resumable upload URI +async fn upload_chunks( + upload_uri: &str, + byte_stream: impl Stream> + Send + 'static, + _access_token: &str, +) -> Result<()> { + let client = Client::builder() + .timeout(Duration::from_secs(300)) // 5 minute timeout per chunk + .build()?; + + pin_mut!(byte_stream); + + // First, we need to collect all data to know total size + // For very large files, we could use unknown size (*) but that's more complex + let mut all_data = Vec::new(); + while let Some(chunk_result) = byte_stream.next().await { + let chunk = chunk_result?; + all_data.extend_from_slice(&chunk); + } + + let total_size = all_data.len() as u64; + info!( + "Total upload size: {} bytes ({:.2} MB)", + total_size, + total_size as f64 / 1024.0 / 1024.0 + ); + + if total_size == 0 { + // Handle empty file case + let response = client + .put(upload_uri) + .header(CONTENT_LENGTH, "0") + .header(CONTENT_RANGE, "bytes */*") + .send() + .await + .context("Failed to upload empty file")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + return Err(anyhow!( + "Failed to upload empty file: {} - {}", + status, + body + )); + } + return Ok(()); + } + + // Upload in chunks + let mut offset: u64 = 0; + 'outer: while offset < total_size { + let chunk_end = std::cmp::min(offset + CHUNK_SIZE as u64, total_size); + let chunk_data = &all_data[offset as usize..chunk_end as usize]; + let is_last_chunk = chunk_end == total_size; + + let content_range = format!("bytes {}-{}/{}", offset, chunk_end - 1, total_size); + + debug!( + "Uploading chunk: {} ({} bytes)", + content_range, + chunk_data.len() + ); + + let mut attempt = 0; + loop { + let response = client + .put(upload_uri) + .header(CONTENT_LENGTH, chunk_data.len().to_string()) + .header(CONTENT_RANGE, &content_range) + .header(CONTENT_TYPE, "application/octet-stream") + .body(chunk_data.to_vec()) + .send() + .await; + + match response { + Ok(resp) => { + let status = resp.status(); + + // 200 or 201 = upload complete + // 308 = chunk accepted, continue + if status.is_success() { + if is_last_chunk { + info!("Upload complete!"); + } + break; + } else if status.as_u16() == 308 { + // Resume Incomplete - chunk accepted + debug!("Chunk uploaded successfully (308)"); + break; + } else if status.is_server_error() || status.as_u16() == 429 { + attempt += 1; + if attempt >= MAX_RETRIES { + let body = resp.text().await.unwrap_or_default(); + return Err(anyhow!( + "Failed to upload chunk after {} retries: {} - {}", + MAX_RETRIES, + status, + body + )); + } + let backoff = INITIAL_BACKOFF_MS * 2u64.pow(attempt); + warn!( + "Chunk upload failed with status {}, retrying in {}ms (attempt {}/{})", + status, backoff, attempt, MAX_RETRIES + ); + sleep(Duration::from_millis(backoff)).await; + + // Query the upload status to resume from correct position + if let Some(new_offset) = + query_upload_status(&client, upload_uri, total_size).await? + { + if new_offset != offset { + info!("Resuming from byte {} (was at {})", new_offset, offset); + offset = new_offset; + continue 'outer; // Recalculate chunk from new position + } + } + } else { + let body = resp.text().await.unwrap_or_default(); + return Err(anyhow!("Failed to upload chunk: {} - {}", status, body)); + } + } + Err(e) => { + attempt += 1; + if attempt >= MAX_RETRIES { + return Err(anyhow!( + "Failed to upload chunk after {} retries: {}", + MAX_RETRIES, + e + )); + } + let backoff = INITIAL_BACKOFF_MS * 2u64.pow(attempt); + warn!( + "Chunk upload request failed: {}, retrying in {}ms (attempt {}/{})", + e, backoff, attempt, MAX_RETRIES + ); + sleep(Duration::from_millis(backoff)).await; + + // Query the upload status to resume from correct position + if let Some(new_offset) = + query_upload_status(&client, upload_uri, total_size).await? + { + if new_offset != offset { + info!("Resuming from byte {} (was at {})", new_offset, offset); + offset = new_offset; + continue 'outer; // Recalculate chunk from new position + } + } + } + } + } + + offset = chunk_end; + + // Log progress every 100MB + if offset % (100 * 1024 * 1024) < CHUNK_SIZE as u64 { + info!( + "Upload progress: {:.1}% ({:.2} MB / {:.2} MB)", + (offset as f64 / total_size as f64) * 100.0, + offset as f64 / 1024.0 / 1024.0, + total_size as f64 / 1024.0 / 1024.0 + ); + } + } + + Ok(()) +} + +/// Query the current upload status to determine how many bytes have been received +async fn query_upload_status( + client: &Client, + upload_uri: &str, + total_size: u64, +) -> Result> { + let response = client + .put(upload_uri) + .header(CONTENT_LENGTH, "0") + .header(CONTENT_RANGE, format!("bytes */{}", total_size)) + .send() + .await; + + match response { + Ok(resp) => { + let status = resp.status(); + if status.as_u16() == 308 { + // Parse Range header to find out how much was uploaded + if let Some(range) = resp.headers().get("Range") { + let range_str = range.to_str().unwrap_or(""); + // Format: "bytes=0-N" where N is the last byte received + if let Some(end) = range_str.strip_prefix("bytes=0-") { + if let Ok(last_byte) = end.parse::() { + return Ok(Some(last_byte + 1)); + } + } + } + // No Range header means nothing uploaded yet + return Ok(Some(0)); + } else if status.is_success() { + // Upload is already complete + return Ok(None); + } + // Other status - can't determine position + Ok(None) + } + Err(_) => Ok(None), + } +} + +/// Gets an access token using the service account credentials from GOOGLE_APPLICATION_CREDENTIALS +pub async fn get_access_token() -> Result { + // Use gcloud auth to get the token, or parse the service account JSON + // The cloud-storage crate handles this internally, but we need to do it manually for reqwest + + // First try service account file if GOOGLE_APPLICATION_CREDENTIALS is set + // This ensures we use the correct OAuth scopes defined in get_token_from_service_account + if let Ok(credentials_path) = std::env::var("GOOGLE_APPLICATION_CREDENTIALS") { + if let Ok(token) = get_token_from_service_account(&credentials_path).await { + return Ok(token); + } + } + + // Fall back to metadata service (when running on GCP without explicit credentials) + get_token_from_metadata_service() + .await + .context("Failed to get access token from metadata service or service account file") +} + +async fn get_token_from_metadata_service() -> Result { + let client = Client::builder().timeout(Duration::from_secs(5)).build()?; + + let response = client + .get("http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token") + .header("Metadata-Flavor", "Google") + .send() + .await?; + + if response.status().is_success() { + let json: serde_json::Value = response.json().await?; + let token = json["access_token"] + .as_str() + .ok_or_else(|| anyhow!("No access_token in metadata response"))?; + Ok(token.to_string()) + } else { + Err(anyhow!("Failed to get token from metadata service")) + } +} + +async fn get_token_from_service_account(credentials_path: &str) -> Result { + use jsonwebtoken::{encode, Algorithm, EncodingKey, Header}; + use serde::{Deserialize, Serialize}; + use std::time::{SystemTime, UNIX_EPOCH}; + + #[derive(Debug, Deserialize)] + struct ServiceAccount { + client_email: String, + private_key: String, + token_uri: String, + } + + #[derive(Debug, Serialize)] + struct Claims { + iss: String, + scope: String, + aud: String, + exp: u64, + iat: u64, + } + + let credentials_json = std::fs::read_to_string(credentials_path) + .context("Failed to read service account credentials file")?; + let sa: ServiceAccount = + serde_json::from_str(&credentials_json).context("Failed to parse service account JSON")?; + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let claims = Claims { + iss: sa.client_email.clone(), + scope: "https://www.googleapis.com/auth/devstorage.read_write".to_string(), + aud: sa.token_uri.clone(), + exp: now + 3600, + iat: now, + }; + + let header = Header::new(Algorithm::RS256); + let key = EncodingKey::from_rsa_pem(sa.private_key.as_bytes()) + .context("Failed to parse private key")?; + let jwt = encode(&header, &claims, &key).context("Failed to encode JWT")?; + + // Exchange JWT for access token + let client = Client::new(); + let response = client + .post(&sa.token_uri) + .form(&[ + ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"), + ("assertion", &jwt), + ]) + .send() + .await + .context("Failed to exchange JWT for access token")?; + + if response.status().is_success() { + let json: serde_json::Value = response.json().await?; + let token = json["access_token"] + .as_str() + .ok_or_else(|| anyhow!("No access_token in token response"))?; + Ok(token.to_string()) + } else { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + Err(anyhow!("Failed to get access token: {} - {}", status, body)) + } +} diff --git a/src/snapshot/mod.rs b/src/snapshot/mod.rs index 689d604c..20096f61 100644 --- a/src/snapshot/mod.rs +++ b/src/snapshot/mod.rs @@ -29,6 +29,7 @@ use s3::{bucket::Bucket, BucketConfiguration}; use s3_utils::multipart_upload::put_object_stream_custom; use tokio::io::{AsyncRead, ReadBuf}; +pub mod gcs_utils; pub mod s3_utils; pub const MEGABYTE: usize = 1024 * 1024; @@ -258,30 +259,24 @@ impl GCSDirectoryAdapter { byte_stream: impl Stream> + std::marker::Send + 'static, ) -> Result<()> { let full_path = if self.gcs_prefix.is_empty() { - path + path.clone() } else { format!("{}/{}", self.gcs_prefix, path) }; - // Collect the stream into a Vec - pin_mut!(byte_stream); - let mut data = Vec::new(); - while let Some(chunk) = byte_stream.next().await { - let chunk = chunk?; - data.extend_from_slice(&chunk); - } - - // Upload to GCS - self.gcs_client - .object() - .create( - &self.gcs_bucket, - data, - &full_path, - "application/octet-stream", - ) + // Use resumable upload for reliable large file uploads + let access_token = gcs_utils::resumable_upload::get_access_token() .await - .with_context(|| format!("Failed to write file to GCS: {:?}", full_path))?; + .with_context(|| "Failed to get GCS access token")?; + + gcs_utils::resumable_upload::resumable_upload( + &self.gcs_bucket, + &full_path, + byte_stream, + &access_token, + ) + .await + .with_context(|| format!("Failed to write file to GCS: {:?}", full_path))?; Ok(()) } diff --git a/tests/integration_tests/batched_address_tree_tests.rs b/tests/integration_tests/batched_address_tree_tests.rs index f082d31b..5a694896 100644 --- a/tests/integration_tests/batched_address_tree_tests.rs +++ b/tests/integration_tests/batched_address_tree_tests.rs @@ -3,10 +3,10 @@ use function_name::named; use light_hasher::hash_to_field_size::hashv_to_bn254_field_size_be_const_array; use light_hasher::Poseidon; use num_bigint::BigUint; -use photon_indexer::api::method::get_batch_address_update_info::GetBatchAddressUpdateInfoRequest; use photon_indexer::api::method::get_multiple_new_address_proofs::{ AddressListWithTrees, AddressWithTree, }; +use photon_indexer::api::method::get_queue_elements::{GetQueueElementsRequest, QueueRequest}; use photon_indexer::common::typedefs::serializable_pubkey::SerializablePubkey; use rand::prelude::StdRng; use rand::{Rng, SeedableRng}; @@ -163,16 +163,25 @@ async fn run_batched_address_test( println!("Verifying address queue state before batch update..."); let queue_elements_before = setup .api - .get_batch_address_update_info(GetBatchAddressUpdateInfoRequest { + .get_queue_elements(GetQueueElementsRequest { tree: address_tree_pubkey.to_bytes().into(), - start_queue_index: None, - limit: 100, + output_queue: None, + input_queue: None, + address_queue: Some(QueueRequest { + limit: 100, + start_index: None, + zkp_batch_size: None, + }), }) .await .expect("Failed to get address queue elements before batch update"); + let address_queue_before = queue_elements_before + .address_queue + .expect("Address queue should be present"); + assert_eq!( - queue_elements_before.addresses.len(), + address_queue_before.addresses.len(), total_addresses, "Address queue length mismatch before batch update" ); @@ -180,12 +189,12 @@ async fn run_batched_address_test( println!("expected_addresses len: {}", expected_addresses.len()); println!( "addresses in queue len: {}", - queue_elements_before.addresses.len() + address_queue_before.addresses.len() ); - for (i, element) in queue_elements_before.addresses.iter().enumerate() { + for (i, element) in address_queue_before.addresses.iter().enumerate() { assert_eq!( - element.address.0.to_bytes(), + element.0.to_bytes(), expected_addresses[i].0, // Compare the underlying [u8; 32] "Address queue content mismatch at index {} before batch update", i @@ -211,19 +220,26 @@ async fn run_batched_address_test( println!("Verifying address queue state after batch update..."); let queue_elements_after = setup .api - .get_batch_address_update_info(GetBatchAddressUpdateInfoRequest { + .get_queue_elements(GetQueueElementsRequest { tree: address_tree_pubkey.to_bytes().into(), - start_queue_index: None, - limit: 100, + output_queue: None, + input_queue: None, + address_queue: Some(QueueRequest { + limit: 100, + start_index: None, + zkp_batch_size: None, + }), }) .await .expect("Failed to get address queue elements after batch update"); - println!("Queue elements after update: {:?}", queue_elements_after); + let address_queue_after = queue_elements_after.address_queue; + println!("Queue elements after update: {:?}", address_queue_after); + let addresses_after = address_queue_after.map(|q| q.addresses).unwrap_or_default(); assert!( - queue_elements_after.addresses.is_empty(), + addresses_after.is_empty(), "Address queue should be empty after batch update, but found {} elements", - queue_elements_after.addresses.len() + addresses_after.len() ); println!("Address queue state verified after batch update (empty)."); diff --git a/tests/integration_tests/batched_state_tree_tests.rs b/tests/integration_tests/batched_state_tree_tests.rs index 96494ad9..f910396b 100644 --- a/tests/integration_tests/batched_state_tree_tests.rs +++ b/tests/integration_tests/batched_state_tree_tests.rs @@ -1,14 +1,12 @@ use crate::utils::*; use borsh::BorshSerialize; use function_name::named; -use light_compressed_account::QueueType; -use light_hasher::zero_bytes::poseidon::ZERO_BYTES; use photon_indexer::api::method::get_compressed_accounts_by_owner::GetCompressedAccountsByOwnerRequest; use photon_indexer::api::method::get_compressed_token_balances_by_owner::{ GetCompressedTokenBalancesByOwnerRequest, TokenBalance, }; use photon_indexer::api::method::get_multiple_compressed_account_proofs::HashList; -use photon_indexer::api::method::get_queue_elements::GetQueueElementsRequest; +use photon_indexer::api::method::get_queue_elements::{GetQueueElementsRequest, QueueRequest}; use photon_indexer::api::method::get_transaction_with_compression_info::{ get_transaction_helper, get_transaction_helper_v2, }; @@ -152,29 +150,12 @@ async fn test_batched_tree_transactions( println!("accounts {:?}", accounts); - // Get output queue elements + // Track queue lengths and tree pubkey for later verification if !accounts.openedAccounts.is_empty() { output_queue_len += accounts.openedAccounts.len(); merkle_tree_pubkey = accounts.openedAccounts[0].account.merkle_context.tree.0; queue_pubkey = accounts.openedAccounts[0].account.merkle_context.queue.0; - let get_queue_elements_result = setup - .api - .get_queue_elements(GetQueueElementsRequest { - tree: merkle_tree_pubkey.to_bytes().into(), - start_queue_index: None, - queue_type: QueueType::OutputStateV2 as u8, - limit: 100, - }) - .await - .unwrap(); - assert_eq!(get_queue_elements_result.value.len(), output_queue_len); - for (i, element) in get_queue_elements_result.value.iter().enumerate() { - assert_eq!(element.account_hash.0, output_queue_elements[i]); - let proof = element.proof.iter().map(|x| x.0).collect::>(); - assert_eq!(proof, ZERO_BYTES[..proof.len()].to_vec()); - } } - // Get input queue elements if !accounts.closedAccounts.is_empty() { input_queue_len += accounts.closedAccounts.len(); merkle_tree_pubkey = accounts.closedAccounts[0] @@ -183,22 +164,6 @@ async fn test_batched_tree_transactions( .merkle_context .tree .0; - let get_queue_elements_result = setup - .api - .get_queue_elements(GetQueueElementsRequest { - tree: merkle_tree_pubkey.to_bytes().into(), - start_queue_index: None, - queue_type: QueueType::InputStateV2 as u8, - limit: 100, - }) - .await - .unwrap(); - assert_eq!(get_queue_elements_result.value.len(), input_queue_len); - for (i, element) in get_queue_elements_result.value.iter().enumerate() { - assert_eq!(element.account_hash.0, input_queue_elements[i].0); - let proof = element.proof.iter().map(|x| x.0).collect::>(); - assert_eq!(proof, ZERO_BYTES[..proof.len()].to_vec()); - } } } let filtered_outputs = output_queue_elements @@ -274,9 +239,13 @@ async fn test_batched_tree_transactions( .api .get_queue_elements(GetQueueElementsRequest { tree: merkle_tree_pubkey.to_bytes().into(), - start_queue_index: None, - queue_type: QueueType::OutputStateV2 as u8, - limit: 100, + output_queue: Some(QueueRequest { + limit: 100, + start_index: None, + zkp_batch_size: None, + }), + input_queue: None, + address_queue: None, }) .await .unwrap(); @@ -284,9 +253,13 @@ async fn test_batched_tree_transactions( .api .get_queue_elements(GetQueueElementsRequest { tree: merkle_tree_pubkey.to_bytes().into(), - start_queue_index: None, - queue_type: QueueType::InputStateV2 as u8, - limit: 100, + output_queue: None, + input_queue: Some(QueueRequest { + limit: 100, + start_index: None, + zkp_batch_size: None, + }), + address_queue: None, }) .await .unwrap(); @@ -303,9 +276,13 @@ async fn test_batched_tree_transactions( .api .get_queue_elements(GetQueueElementsRequest { tree: merkle_tree_pubkey.to_bytes().into(), - start_queue_index: None, - queue_type: QueueType::OutputStateV2 as u8, - limit: 100, + output_queue: Some(QueueRequest { + limit: 100, + start_index: None, + zkp_batch_size: None, + }), + input_queue: None, + address_queue: None, }) .await .unwrap(); @@ -313,87 +290,124 @@ async fn test_batched_tree_transactions( .api .get_queue_elements(GetQueueElementsRequest { tree: merkle_tree_pubkey.to_bytes().into(), - start_queue_index: None, - queue_type: QueueType::InputStateV2 as u8, - limit: 100, + output_queue: None, + input_queue: Some(QueueRequest { + limit: 100, + start_index: None, + zkp_batch_size: None, + }), + address_queue: None, }) .await .unwrap(); let is_nullify_event = i > 9; if is_nullify_event { println!("nullify event {} {}", i, signature); + let pre_output_len = pre_output_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.output_queue.as_ref()) + .map_or(0, |v| v.leaves.len()); + let post_output_len = post_output_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.output_queue.as_ref()) + .map_or(0, |v| v.leaves.len()); + let pre_input_len = pre_input_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.input_queue.as_ref()) + .map_or(0, |v| v.leaves.len()); + let post_input_len = post_input_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.input_queue.as_ref()) + .map_or(0, |v| v.leaves.len()); + assert_eq!( - post_output_queue_elements.value.len(), - pre_output_queue_elements.value.len(), + post_output_len, pre_output_len, "Nullify event should not change the length of the output queue." ); assert_eq!( - post_input_queue_elements.value.len(), - pre_input_queue_elements.value.len() - 10, + post_input_len, + pre_input_len.saturating_sub(10), "Nullify event should decrease the length of the input queue by 10." ); - // Insert 1 batch. - for element in pre_input_queue_elements.value[..10].iter() { - println!("nullify leaf index {}", element.leaf_index); - let nullifier = input_queue_elements - .iter() - .find(|x| x.0 == element.account_hash.0) - .unwrap() - .1; - event_merkle_tree - .update(&nullifier, element.leaf_index as usize) - .unwrap(); - } - for element in post_input_queue_elements.value.iter() { - let proof_result = event_merkle_tree - .get_proof_of_leaf(element.leaf_index as usize, true) - .unwrap() - .to_vec(); - let proof = element.proof.iter().map(|x| x.0).collect::>(); - assert_eq!(proof, proof_result); + // Insert 1 batch if we have elements. + if let Some(pre_input_queue) = pre_input_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.input_queue.as_ref()) + { + let slice_length = pre_input_queue.leaves.len().min(10); + for idx in 0..slice_length { + let leaf_index = pre_input_queue.leaf_indices[idx]; + let leaf = &pre_input_queue.leaves[idx]; + println!("nullify leaf index {}", leaf_index); + let nullifier = input_queue_elements + .iter() + .find(|x| x.0 == leaf.0) + .unwrap() + .1; + event_merkle_tree + .update(&nullifier, leaf_index as usize) + .unwrap(); + } } } else { last_inserted_index += 10; + let pre_output_len = pre_output_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.output_queue.as_ref()) + .map_or(0, |v| v.leaves.len()); + let post_output_len = post_output_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.output_queue.as_ref()) + .map_or(0, |v| v.leaves.len()); + let pre_input_len = pre_input_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.input_queue.as_ref()) + .map_or(0, |v| v.leaves.len()); + let post_input_len = post_input_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.input_queue.as_ref()) + .map_or(0, |v| v.leaves.len()); + assert_eq!( - post_input_queue_elements.value.len(), - pre_input_queue_elements.value.len(), + post_input_len, pre_input_len, "Append event should not change the length of the input queue." ); assert_eq!( - post_output_queue_elements.value.len(), - pre_output_queue_elements.value.len().saturating_sub(10), + post_output_len, + pre_output_len.saturating_sub(10), "Append event should decrease the length of the output queue by 10." ); - println!( - "post input queue len {}", - post_input_queue_elements.value.len(), - ); - println!( - "pre input queue len {}", - pre_input_queue_elements.value.len(), - ); + println!("post input queue len {}", post_input_len,); + println!("pre input queue len {}", pre_input_len,); // Insert 1 batch. - let slice_length = pre_output_queue_elements.value.len().min(10); - for element in pre_output_queue_elements.value[..slice_length].iter() { - // for element in pre_output_queue_elements.value[..10].iter() { - let leaf = event_merkle_tree.leaf(element.leaf_index as usize); + let pre_output_queue = pre_output_queue_elements + .state_queue + .as_ref() + .and_then(|sq| sq.output_queue.as_ref()) + .unwrap(); + let slice_length = pre_output_queue.leaves.len().min(10); + for idx in 0..slice_length { + let leaf_index = pre_output_queue.leaf_indices[idx]; + let leaf_hash = &pre_output_queue.leaves[idx]; + let leaf = event_merkle_tree.leaf(leaf_index as usize); if leaf == [0u8; 32] { event_merkle_tree - .update(&element.account_hash.0, element.leaf_index as usize) + .update(&leaf_hash.0, leaf_index as usize) .unwrap(); - println!("append leaf index {}", element.leaf_index); + println!("append leaf index {}", leaf_index); } } - for element in post_output_queue_elements.value.iter() { - let proof_result = event_merkle_tree - .get_proof_of_leaf(element.leaf_index as usize, true) - .unwrap() - .to_vec(); - let proof = element.proof.iter().map(|x| x.0).collect::>(); - assert_eq!(proof, proof_result); - } } for (j, chunk) in filtered_outputs.chunks(4).enumerate() { let validity_proof = setup @@ -434,21 +448,28 @@ async fn test_batched_tree_transactions( == queue_pubkey.to_string())); } } - assert_eq!(event_merkle_tree.root(), merkle_tree.root()); assert_eq!(output_queue_len, 100); assert_eq!(input_queue_len, 50); let get_queue_elements_result = setup .api .get_queue_elements(GetQueueElementsRequest { tree: merkle_tree_pubkey.to_bytes().into(), - start_queue_index: None, - queue_type: QueueType::OutputStateV2 as u8, - limit: 100, + output_queue: Some(QueueRequest { + limit: 100, + start_index: None, + zkp_batch_size: None, + }), + input_queue: None, + address_queue: None, }) .await .unwrap(); assert_eq!( - get_queue_elements_result.value.len(), + get_queue_elements_result + .state_queue + .as_ref() + .and_then(|sq| sq.output_queue.as_ref()) + .map_or(0, |v| v.leaves.len()), 0, "Batched append events not indexed correctly." ); @@ -457,14 +478,22 @@ async fn test_batched_tree_transactions( .api .get_queue_elements(GetQueueElementsRequest { tree: merkle_tree_pubkey.to_bytes().into(), - start_queue_index: None, - queue_type: QueueType::InputStateV2 as u8, - limit: 100, + output_queue: None, + input_queue: Some(QueueRequest { + limit: 100, + start_index: None, + zkp_batch_size: None, + }), + address_queue: None, }) .await .unwrap(); assert_eq!( - get_queue_elements_result.value.len(), + get_queue_elements_result + .state_queue + .as_ref() + .and_then(|sq| sq.input_queue.as_ref()) + .map_or(0, |v| v.leaves.len()), 0, "Batched nullify events not indexed correctly." ); diff --git a/tests/integration_tests/utils.rs b/tests/integration_tests/utils.rs index 3b44e791..073b4402 100644 --- a/tests/integration_tests/utils.rs +++ b/tests/integration_tests/utils.rs @@ -31,10 +31,10 @@ use photon_indexer::ingester::index_block; use photon_indexer::ingester::typedefs::block_info::BlockMetadata; use photon_indexer::monitor::tree_metadata_sync::{upsert_tree_metadata, TreeAccountData}; pub use rstest::rstest; +use solana_account::Account as SolanaAccount; use solana_client::{ nonblocking::rpc_client::RpcClient, rpc_config::RpcTransactionConfig, rpc_request::RpcRequest, }; -use solana_account::Account as SolanaAccount; use solana_clock::Slot; use solana_commitment_config::CommitmentConfig; use solana_commitment_config::CommitmentLevel; @@ -240,11 +240,20 @@ pub async fn setup(name: String, database_backend: DatabaseBackend) -> TestSetup pub async fn setup_pg_pool(database_url: String) -> PgPool { let options: PgConnectOptions = database_url.parse().unwrap(); - PgPoolOptions::new() + let pool = PgPoolOptions::new() .min_connections(1) .connect_with(options) .await - .unwrap() + .unwrap(); + + // Set default isolation level to READ COMMITTED for all connections in the pool + // This ensures each statement sees the latest committed data + sqlx::query("SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED") + .execute(&pool) + .await + .unwrap(); + + pool } pub async fn setup_sqllite_pool() -> SqlitePool {