From 7a9b7756c1c1f7001625ec311bd5682b42acbe50 Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Tue, 5 May 2026 21:41:39 +0100 Subject: [PATCH 01/16] statement-store: new api implementation --- Cargo.lock | 5 + cumulus/polkadot-omni-node/lib/Cargo.toml | 1 + .../polkadot-omni-node/lib/src/common/rpc.rs | 6 +- .../tests/zombie_ci/statement_store/common.rs | 86 ++ .../zombie_ci/statement_store/integration.rs | 159 +++- substrate/client/rpc-spec-v2/Cargo.toml | 4 + substrate/client/rpc-spec-v2/src/lib.rs | 1 + .../client/rpc-spec-v2/src/statement/api.rs | 59 ++ .../client/rpc-spec-v2/src/statement/error.rs | 70 ++ .../client/rpc-spec-v2/src/statement/event.rs | 92 ++ .../client/rpc-spec-v2/src/statement/mod.rs | 33 + .../rpc-spec-v2/src/statement/statement.rs | 183 ++++ .../rpc-spec-v2/src/statement/subscription.rs | 789 ++++++++++++++++++ .../client/rpc-spec-v2/src/statement/tests.rs | 497 +++++++++++ substrate/client/statement-store/src/lib.rs | 316 ++++++- .../statement-store/src/subscription.rs | 166 +++- .../primitives/statement-store/src/lib.rs | 5 +- .../statement-store/src/store_api.rs | 27 + 18 files changed, 2485 insertions(+), 14 deletions(-) create mode 100644 substrate/client/rpc-spec-v2/src/statement/api.rs create mode 100644 substrate/client/rpc-spec-v2/src/statement/error.rs create mode 100644 substrate/client/rpc-spec-v2/src/statement/event.rs create mode 100644 substrate/client/rpc-spec-v2/src/statement/mod.rs create mode 100644 substrate/client/rpc-spec-v2/src/statement/statement.rs create mode 100644 substrate/client/rpc-spec-v2/src/statement/subscription.rs create mode 100644 substrate/client/rpc-spec-v2/src/statement/tests.rs diff --git a/Cargo.lock b/Cargo.lock index e7cc7692a99b9..11f789717a6fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16994,6 +16994,7 @@ dependencies = [ "sc-network-sync", "sc-offchain", "sc-rpc", + "sc-rpc-spec-v2", "sc-runtime-utilities", "sc-service", "sc-statement-store", @@ -22087,8 +22088,10 @@ dependencies = [ "sc-block-builder", "sc-chain-spec 28.0.0", "sc-client-api 28.0.0", + "sc-keystore", "sc-rpc", "sc-service", + "sc-statement-store", "sc-transaction-pool", "sc-transaction-pool-api 28.0.0", "sc-utils 14.0.0", @@ -22104,11 +22107,13 @@ dependencies = [ "sp-maybe-compressed-blob 11.0.0", "sp-rpc", "sp-runtime 31.0.1", + "sp-statement-store", "sp-version 29.0.0", "substrate-prometheus-endpoint 0.17.0", "substrate-test-runtime", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", + "tempfile", "thiserror 1.0.65", "tokio", "tokio-stream", diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index 6fb25b60bbda8..feba1972b3432 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -58,6 +58,7 @@ sc-network-statement = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } sc-runtime-utilities = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = false } sc-statement-store = { workspace = true, default-features = true } diff --git a/cumulus/polkadot-omni-node/lib/src/common/rpc.rs b/cumulus/polkadot-omni-node/lib/src/common/rpc.rs index 51841a3516dee..d247efe732ab1 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/rpc.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/rpc.rs @@ -27,6 +27,7 @@ use sc_rpc::{ dev::{Dev, DevApiServer}, statement::{StatementApiServer, StatementStore}, }; +use sc_rpc_spec_v2::statement::{StatementSpec, StatementSpecApiServer}; use sp_runtime::traits::Block as BlockT; use std::{marker::PhantomData, sync::Arc}; use substrate_frame_rpc_system::{System, SystemApiServer}; @@ -76,7 +77,10 @@ where module.merge(TransactionPayment::new(client.clone()).into_rpc())?; module.merge(StateMigration::new(client.clone(), backend).into_rpc())?; if let Some(statement_store) = statement_store { - module.merge(StatementStore::new(statement_store, spawn_handle).into_rpc())?; + module.merge( + StatementStore::new(statement_store.clone(), spawn_handle.clone()).into_rpc(), + )?; + module.merge(StatementSpec::new(statement_store, spawn_handle).into_rpc())?; } module.merge(Dev::new(client).into_rpc())?; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs index 214e1b5a2e6b5..109a90d1b23c1 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs @@ -10,6 +10,7 @@ use anyhow::anyhow; use codec::Encode; use log::info; use sc_statement_store::test_utils::get_keypair; +use serde::Deserialize; use sp_core::{hexdisplay::HexDisplay, Bytes, Pair}; use sp_statement_store::{ statement_allowance_key, StatementAllowance, StatementEvent, SubmitResult, Topic, TopicFilter, @@ -28,6 +29,38 @@ use zombienet_sdk::{ pub(super) const RPC_POOL_SIZE: usize = 10000; +#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] +#[serde(tag = "event", rename_all = "camelCase")] +pub(super) enum UnstableStatementEvent { + ReplayStatements { + #[serde(rename = "filterId")] + filter_id: String, + statements: Vec, + }, + ReplayDone { + #[serde(rename = "filterId")] + filter_id: String, + }, + NewStatements { + statements: Vec, + }, + Stop, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] +pub(super) struct UnstableNewStatement { + pub statement: Bytes, + #[serde(rename = "filterIds")] + pub filter_ids: Vec, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] +#[serde(untagged)] +pub(super) enum UnstableAddFilterResponse { + Ok(String), + LimitReached { result: String }, +} + pub(super) async fn submit_statement( rpc: &RpcClient, statement: &sp_statement_store::Statement, @@ -37,6 +70,59 @@ pub(super) async fn submit_statement( Ok(result) } +pub(super) async fn submit_statement_unstable( + rpc: &RpcClient, + statement: &sp_statement_store::Statement, +) -> Result { + let encoded: Bytes = statement.encode().into(); + let result: SubmitResult = + rpc.request("statement_unstable_submit", rpc_params![encoded]).await?; + Ok(result) +} + +pub(super) async fn subscribe_unstable( + rpc: &RpcClient, +) -> Result, anyhow::Error> { + let subscription = rpc + .subscribe::( + "statement_unstable_subscribe", + rpc_params![], + "statement_unstable_unsubscribe", + ) + .await?; + Ok(subscription) +} + +pub(super) fn unstable_subscription_id( + subscription: &RpcSubscription, +) -> Result { + subscription + .subscription_id() + .map(ToOwned::to_owned) + .ok_or_else(|| anyhow!("Subscription was accepted without an id")) +} + +pub(super) async fn add_filter_unstable( + rpc: &RpcClient, + subscription_id: &str, + filter: TopicFilter, +) -> Result { + let response = rpc + .request("statement_unstable_add_filter", rpc_params![subscription_id, filter]) + .await?; + Ok(response) +} + +pub(super) async fn remove_filter_unstable( + rpc: &RpcClient, + subscription_id: &str, + filter_id: &str, +) -> Result<(), anyhow::Error> { + rpc.request::<()>("statement_unstable_remove_filter", rpc_params![subscription_id, filter_id]) + .await?; + Ok(()) +} + pub(super) async fn expect_one_statement( subscription: &mut RpcSubscription, timeout_secs: u64, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs index d7df8fe429098..0521e7f139e34 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 use super::common::{ - assert_no_more_statements, assert_statements_match, base_dir, collator_default_args, - create_chain_spec_with_allowances, expect_one_statement, expect_statements_unordered, - spawn_network_sudo, spawn_network_with_injected_allowances, submit_statement, subscribe_topic, - subscribe_topic_filter, + add_filter_unstable, assert_no_more_statements, assert_statements_match, base_dir, + collator_default_args, create_chain_spec_with_allowances, expect_one_statement, + expect_statements_unordered, remove_filter_unstable, spawn_network_sudo, + spawn_network_with_injected_allowances, submit_statement, submit_statement_unstable, + subscribe_topic, subscribe_topic_filter, subscribe_unstable, unstable_subscription_id, + UnstableAddFilterResponse, UnstableStatementEvent, }; use codec::Encode; use futures::future::join_all; @@ -13,6 +15,7 @@ use log::{debug, info}; use sc_network_statement::config::STATEMENTS_BURST_COEFFICIENT; use sc_statement_store::test_utils::{create_allowance_items, create_test_statement, get_keypair}; use sp_core::Bytes; +use sp_runtime::BoundedVec; use sp_statement_store::{ RejectionReason, Statement, StatementAllowance, SubmitResult, Topic, TopicFilter, }; @@ -24,6 +27,65 @@ use std::{ }; use zombienet_sdk::{LocalFileSystem, Network, NetworkConfigBuilder}; +async fn expect_unstable_event( + subscription: &mut zombienet_sdk::subxt::ext::subxt_rpcs::client::RpcSubscription< + UnstableStatementEvent, + >, + timeout_secs: u64, +) -> Result { + tokio::time::timeout(Duration::from_secs(timeout_secs), subscription.next()) + .await + .map_err(|_| anyhow::anyhow!("Timeout waiting for unstable statement event"))? + .ok_or_else(|| anyhow::anyhow!("Unstable statement subscription ended"))? + .map_err(|e| anyhow::anyhow!("Unstable statement subscription error: {}", e)) +} + +async fn collect_unstable_replay( + subscription: &mut zombienet_sdk::subxt::ext::subxt_rpcs::client::RpcSubscription< + UnstableStatementEvent, + >, + filter_id: &str, +) -> Result, anyhow::Error> { + let mut statements = Vec::new(); + loop { + match expect_unstable_event(subscription, 20).await? { + UnstableStatementEvent::ReplayStatements { filter_id: id, statements: chunk } + if id == filter_id => + { + statements.extend(chunk) + }, + UnstableStatementEvent::ReplayDone { filter_id: id } if id == filter_id => { + return Ok(statements) + }, + event => anyhow::bail!("Unexpected unstable event before replayDone: {:?}", event), + } + } +} + +async fn assert_no_unstable_event( + subscription: &mut zombienet_sdk::subxt::ext::subxt_rpcs::client::RpcSubscription< + UnstableStatementEvent, + >, + timeout_secs: u64, +) -> Result<(), anyhow::Error> { + let result = tokio::time::timeout(Duration::from_secs(timeout_secs), subscription.next()).await; + assert!(result.is_err(), "Expected no unstable statement event but received one"); + Ok(()) +} + +fn match_all_filter(topic: Topic) -> TopicFilter { + TopicFilter::MatchAll(BoundedVec::truncate_from(vec![topic])) +} + +fn filter_id(response: UnstableAddFilterResponse) -> String { + match response { + UnstableAddFilterResponse::Ok(id) => id, + UnstableAddFilterResponse::LimitReached { result } => { + panic!("Expected filter id, got {result}") + }, + } +} + /// Verifies basic statement propagation and data integrity across two nodes /// /// Tests uses the genesis-injection approach for setting allowances @@ -57,6 +119,95 @@ async fn statement_store_basic_propagation() -> Result<(), anyhow::Error> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn statement_store_unstable_rpc_multi_filter_flow() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let network = spawn_network_with_injected_allowances(&["alice"], 6).await?; + let alice = network.get_node("alice")?; + let alice_rpc = alice.rpc().await?; + + let topic_a: Topic = [0xA1; 32].into(); + let topic_b: Topic = [0xB2; 32].into(); + let topic_c: Topic = [0xC3; 32].into(); + + let pre_existing = + create_test_statement(&get_keypair(0), &[topic_a], None, vec![1], u32::MAX, 0); + assert_eq!(submit_statement_unstable(&alice_rpc, &pre_existing).await?, SubmitResult::New); + assert_eq!(submit_statement_unstable(&alice_rpc, &pre_existing).await?, SubmitResult::Known); + + let mut subscription = subscribe_unstable(&alice_rpc).await?; + let subscription_id = unstable_subscription_id(&subscription)?; + + let filter_a = filter_id( + add_filter_unstable(&alice_rpc, &subscription_id, match_all_filter(topic_a)).await?, + ); + let replayed = collect_unstable_replay(&mut subscription, &filter_a).await?; + assert_eq!(replayed, vec![Bytes::from(pre_existing.encode())]); + + let filter_b = filter_id( + add_filter_unstable(&alice_rpc, &subscription_id, match_all_filter(topic_b)).await?, + ); + let replayed = collect_unstable_replay(&mut subscription, &filter_b).await?; + assert!(replayed.is_empty(), "Filter B should not replay topic A statements"); + + let live_ab = + create_test_statement(&get_keypair(1), &[topic_a, topic_b], None, vec![2], u32::MAX, 0); + + assert_eq!(submit_statement_unstable(&alice_rpc, &live_ab).await?, SubmitResult::New); + + match expect_unstable_event(&mut subscription, 20).await? { + UnstableStatementEvent::NewStatements { statements } => { + assert_eq!(statements.len(), 1); + assert_eq!(statements[0].statement, Bytes::from(live_ab.encode())); + let filter_ids: HashSet<_> = statements[0].filter_ids.iter().cloned().collect(); + assert_eq!(filter_ids, HashSet::from([filter_a.clone(), filter_b.clone()])); + }, + event => anyhow::bail!("Expected newStatements event, got {:?}", event), + } + + remove_filter_unstable(&alice_rpc, &subscription_id, &filter_a).await?; + let live_b_after_remove = + create_test_statement(&get_keypair(2), &[topic_a, topic_b], None, vec![3], u32::MAX, 0); + + assert_eq!( + submit_statement_unstable(&alice_rpc, &live_b_after_remove).await?, + SubmitResult::New + ); + + match expect_unstable_event(&mut subscription, 20).await? { + UnstableStatementEvent::NewStatements { statements } => { + assert_eq!(statements.len(), 1); + assert_eq!(statements[0].statement, Bytes::from(live_b_after_remove.encode())); + assert_eq!(statements[0].filter_ids, vec![filter_b.clone()]); + }, + event => anyhow::bail!("Expected newStatements event after remove, got {:?}", event), + } + + remove_filter_unstable(&alice_rpc, &subscription_id, &filter_b).await?; + remove_filter_unstable(&alice_rpc, &subscription_id, "999").await?; + + let ignored_after_remove = + create_test_statement(&get_keypair(3), &[topic_b], None, vec![4], u32::MAX, 0); + + assert_eq!( + submit_statement_unstable(&alice_rpc, &ignored_after_remove).await?, + SubmitResult::New + ); + assert_no_unstable_event(&mut subscription, 3).await?; + + let unsupported_filter = TopicFilter::MatchAny(BoundedVec::truncate_from(vec![topic_c])); + let err = add_filter_unstable(&alice_rpc, &subscription_id, unsupported_filter) + .await + .expect_err("matchAny is not supported by the unstable add_filter RPC"); + + assert!(err.to_string().contains("matchAny")); + + Ok(()) +} + /// Verifies concurrent propagation, quota enforcement, and priority eviction /// /// Spawns a single 4-node network with mixed allowances: diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index d31604db7bdca..4fd99504ac097 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -33,6 +33,7 @@ prometheus-endpoint = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } +sc-statement-store = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } schnellru = { workspace = true } serde = { workspace = true, default-features = true } @@ -42,6 +43,7 @@ sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-rpc = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } thiserror = { workspace = true } tokio = { features = ["sync"], workspace = true, default-features = true } @@ -53,10 +55,12 @@ async-trait = { workspace = true } jsonrpsee = { workspace = true, features = ["server", "ws-client"] } pretty_assertions = { workspace = true } sc-block-builder = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } sc-service = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +tempfile = { workspace = true } serde_json = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/lib.rs b/substrate/client/rpc-spec-v2/src/lib.rs index d0456cf57d2b8..091c7d9943de6 100644 --- a/substrate/client/rpc-spec-v2/src/lib.rs +++ b/substrate/client/rpc-spec-v2/src/lib.rs @@ -31,6 +31,7 @@ pub mod archive; pub mod bitswap; pub mod chain_head; pub mod chain_spec; +pub mod statement; pub mod transaction; /// Task executor that is being used by RPC subscriptions. diff --git a/substrate/client/rpc-spec-v2/src/statement/api.rs b/substrate/client/rpc-spec-v2/src/statement/api.rs new file mode 100644 index 0000000000000..72e86821d2099 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/statement/api.rs @@ -0,0 +1,59 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![allow(non_snake_case)] + +use crate::statement::{ + error::Error, + event::{AddFilterResponse, SubmitOutcome, SubscribeEvent}, +}; +use jsonrpsee::proc_macros::rpc; +use sp_core::Bytes; +use sp_statement_store::TopicFilter; + +#[rpc(client, server)] +pub trait StatementSpecApi { + /// Opens a new statement subscription + #[subscription( + name = "statement_unstable_subscribe" => "statement_unstable_subscribeEvent", + unsubscribe = "statement_unstable_unsubscribe", + item = SubscribeEvent, + with_extensions, + )] + fn statement_unstable_subscribe(&self); + + /// Attaches a filter to an existing subscription + #[method(name = "statement_unstable_add_filter", with_extensions)] + async fn statement_unstable_add_filter( + &self, + subscription: String, + topic_filter: TopicFilter, + ) -> Result; + + /// Detaches a filter from a subscription + #[method(name = "statement_unstable_remove_filter", with_extensions)] + async fn statement_unstable_remove_filter( + &self, + subscription: String, + filter_id: String, + ) -> Result<(), Error>; + + /// Submits a SCALE-encoded statement to the store + #[method(name = "statement_unstable_submit")] + async fn statement_unstable_submit(&self, encoded: Bytes) -> Result; +} diff --git a/substrate/client/rpc-spec-v2/src/statement/error.rs b/substrate/client/rpc-spec-v2/src/statement/error.rs new file mode 100644 index 0000000000000..9ea7f0753abea --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/statement/error.rs @@ -0,0 +1,70 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use jsonrpsee::types::error::ErrorObject; + +/// Error returned by statement RPC methods +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// The connection has reached its active statement subscription limit + #[error("Maximum number of statement subscriptions for this connection has been reached")] + ReachedLimits, + /// The subscription id is not active on this connection + #[error("Invalid statement subscription identifier")] + InvalidSubscription, + /// A request parameter is invalid + #[error("Invalid parameter: {0}")] + InvalidParam(String), + /// The server failed while handling a valid request + #[error("Internal error: {0}")] + InternalError(String), +} + +pub mod rpc_spec_v2 { + /// Connection has too many statement subscriptions + pub const REACHED_LIMITS: i32 = -32800; + /// Subscription identifier is invalid + pub const INVALID_SUBSCRIPTION: i32 = -32801; +} + +pub mod json_rpc_spec { + /// Request parameters are invalid + pub const INVALID_PARAM_ERROR: i32 = -32602; + /// Internal server error occurs + pub const INTERNAL_ERROR: i32 = -32603; +} + +impl From for ErrorObject<'static> { + fn from(e: Error) -> Self { + let msg = e.to_string(); + match e { + Error::ReachedLimits => { + ErrorObject::owned(rpc_spec_v2::REACHED_LIMITS, msg, None::<()>) + }, + Error::InvalidSubscription => { + ErrorObject::owned(rpc_spec_v2::INVALID_SUBSCRIPTION, msg, None::<()>) + }, + Error::InvalidParam(_) => { + ErrorObject::owned(json_rpc_spec::INVALID_PARAM_ERROR, msg, None::<()>) + }, + Error::InternalError(_) => { + ErrorObject::owned(json_rpc_spec::INTERNAL_ERROR, msg, None::<()>) + }, + } + } +} diff --git a/substrate/client/rpc-spec-v2/src/statement/event.rs b/substrate/client/rpc-spec-v2/src/statement/event.rs new file mode 100644 index 0000000000000..0d4f60d31aa77 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/statement/event.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use serde::{Deserialize, Serialize}; +use sp_core::Bytes; +use sp_statement_store::{InvalidReason, RejectionReason}; + +/// Subscription notification event +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(tag = "event", rename_all = "camelCase")] +pub enum SubscribeEvent { + /// Statements admitted before the filter was attached + ReplayStatements { + #[serde(rename = "filterId")] + filter_id: String, + statements: Vec, + }, + /// Replay completion marker + ReplayDone { + #[serde(rename = "filterId")] + filter_id: String, + }, + /// Statements admitted after matching filters were attached + NewStatements { + statements: Vec, + }, + /// Terminal notification + Stop, +} + +/// Statement item included in a `newStatements` notification +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct NewStatementEntry { + pub statement: Bytes, + #[serde(rename = "filterIds")] + pub filter_ids: Vec, +} + +/// Response returned by `statement_unstable_add_filter` +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(untagged)] +pub enum AddFilterResponse { + Ok(String), + LimitReached(LimitReachedResult), +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct LimitReachedResult { + pub result: LimitReachedTag, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub enum LimitReachedTag { + LimitReached, +} + +impl AddFilterResponse { + pub fn limit_reached() -> Self { + AddFilterResponse::LimitReached(LimitReachedResult { + result: LimitReachedTag::LimitReached, + }) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(tag = "status", rename_all = "camelCase")] +pub enum SubmitOutcome { + /// The statement was accepted and was not already present in the store + New, + /// The statement is already present in the store + Known, + /// The statement was valid but the store rejected it + Rejected(RejectionReason), + /// The statement failed validation + Invalid(InvalidReason), +} diff --git a/substrate/client/rpc-spec-v2/src/statement/mod.rs b/substrate/client/rpc-spec-v2/src/statement/mod.rs new file mode 100644 index 0000000000000..f014cd181ae36 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/statement/mod.rs @@ -0,0 +1,33 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod api; +pub mod error; +pub mod event; +mod statement; +mod subscription; + +#[cfg(test)] +mod tests; + +pub use api::StatementSpecApiServer; +pub use error::Error; +pub use event::{AddFilterResponse, SubmitOutcome, SubscribeEvent}; +pub use statement::StatementSpec; + +pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::statement"; diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs new file mode 100644 index 0000000000000..c80ce5afa8686 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -0,0 +1,183 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{ + statement::{ + api::StatementSpecApiServer, + error::Error, + event::{AddFilterResponse, SubmitOutcome}, + subscription::{ + add_filter_sync, parse_filter_id, remove_filter_sync, run_subscription_task, + AddFilterOutcome, ControlMessage, StatementSubscriptions, + }, + LOG_TARGET, + }, + SubscriptionTaskExecutor, +}; +use codec::Decode; +use futures::FutureExt; +use jsonrpsee::{ + core::async_trait, types::SubscriptionId, ConnectionId, Extensions, PendingSubscriptionSink, +}; +use sc_rpc::utils::Subscription; +use sc_statement_store::MultiFilterSubscriptionApi; +use sp_core::Bytes; +use sp_statement_store::{ + OptimizedTopicFilter, Statement, StatementSource, StatementStore, SubmitResult, TopicFilter, +}; +use std::sync::Arc; +use tokio::sync::mpsc; + +/// JSON-RPC server implementation for the `statement_unstable_*` methods +pub struct StatementSpec { + store: Arc, + executor: SubscriptionTaskExecutor, + subscriptions: StatementSubscriptions, +} + +impl StatementSpec +where + B: StatementStore + Send + Sync + 'static, + Arc: MultiFilterSubscriptionApi, +{ + pub fn new(store: Arc, executor: SubscriptionTaskExecutor) -> Self { + Self { store, executor, subscriptions: StatementSubscriptions::new() } + } +} + +fn read_subscription_id_as_string(sink: &Subscription) -> String { + match sink.subscription_id() { + SubscriptionId::Num(n) => n.to_string(), + SubscriptionId::Str(s) => s.into_owned().into(), + } +} + +fn connection_id(ext: &Extensions) -> ConnectionId { + ext.get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed") +} + +fn validate_topic_filter(filter: TopicFilter) -> Result { + match &filter { + TopicFilter::MatchAny(_) => Err(Error::InvalidParam( + "`matchAny` topic filter is not supported by statement_unstable_add_filter; \ + use `\"any\"` or `{\"matchAll\": [...]}` instead" + .to_string(), + )), + _ => Ok(filter.into()), + } +} + +#[async_trait] +impl StatementSpecApiServer for StatementSpec +where + B: StatementStore + Send + Sync + 'static, + Arc: MultiFilterSubscriptionApi, +{ + fn statement_unstable_subscribe(&self, pending: PendingSubscriptionSink, _ext: &Extensions) { + let subscriptions = self.subscriptions.clone(); + let store = self.store.clone(); + let executor = self.executor.clone(); + + let fut = async move { + let connection_id = pending.connection_id(); + let Some(reserved) = subscriptions.reserve(connection_id) else { + pending.reject(Error::ReachedLimits).await; + return; + }; + + let Ok(sink) = pending.accept().await.map(Subscription::from) else { return }; + let sub_id = read_subscription_id_as_string(&sink); + + let (handle, live_stream) = store.create_subscription(); + let (control_tx, control_rx) = mpsc::unbounded_channel::(); + let Some(entry) = reserved.register(sub_id.clone(), handle, control_tx) else { + log::debug!(target: LOG_TARGET, "duplicate subscription id {sub_id}; aborting"); + return; + }; + + let state = entry.state().clone(); + let task = run_subscription_task(sink, state, live_stream, control_rx); + executor.spawn( + "statement-unstable-subscribe", + Some("rpc"), + async move { + task.await; + drop(entry); + } + .boxed(), + ); + }; + + self.executor + .spawn("statement-unstable-subscribe-init", Some("rpc"), fut.boxed()); + } + + async fn statement_unstable_add_filter( + &self, + ext: &Extensions, + subscription: String, + topic_filter: TopicFilter, + ) -> Result { + let conn_id = connection_id(ext); + let topic_filter = validate_topic_filter(topic_filter)?; + + let Some(state) = self.subscriptions.get(conn_id, &subscription) else { + return Err(Error::InvalidSubscription); + }; + + match add_filter_sync(&state, topic_filter)? { + AddFilterOutcome::Added(filter_id) => Ok(AddFilterResponse::Ok( + crate::statement::subscription::filter_id_to_string(filter_id), + )), + AddFilterOutcome::LimitReached => Ok(AddFilterResponse::limit_reached()), + } + } + + async fn statement_unstable_remove_filter( + &self, + ext: &Extensions, + subscription: String, + filter_id: String, + ) -> Result<(), Error> { + let conn_id = connection_id(ext); + let Some(state) = self.subscriptions.get(conn_id, &subscription) else { return Ok(()) }; + let Some(parsed) = parse_filter_id(&filter_id) else { return Ok(()) }; + let _ = remove_filter_sync(&state, parsed); + Ok(()) + } + + async fn statement_unstable_submit(&self, encoded: Bytes) -> Result { + let statement = Statement::decode(&mut &encoded[..]) + .map_err(|e| Error::InvalidParam(format!("Error decoding statement: {e}")))?; + if self.store.has_statement(&statement.hash()) { + return Ok(SubmitOutcome::Known); + } + match self.store.submit(statement, StatementSource::Local) { + SubmitResult::New => Ok(SubmitOutcome::New), + SubmitResult::Known => Ok(SubmitOutcome::Known), + SubmitResult::Rejected(reason) => Ok(SubmitOutcome::Rejected(reason)), + SubmitResult::Invalid(reason) => Ok(SubmitOutcome::Invalid(reason)), + SubmitResult::KnownExpired => { + Err(Error::InternalError("store returned KnownExpired for local submit".into())) + }, + SubmitResult::InternalError(e) => Err(Error::InternalError(e.to_string())), + } + } +} diff --git a/substrate/client/rpc-spec-v2/src/statement/subscription.rs b/substrate/client/rpc-spec-v2/src/statement/subscription.rs new file mode 100644 index 0000000000000..44243c32ab3b5 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/statement/subscription.rs @@ -0,0 +1,789 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::statement::{ + error::Error, + event::{NewStatementEntry, SubscribeEvent}, +}; +use codec::Decode; +use futures::StreamExt; +use jsonrpsee::ConnectionId; +use parking_lot::{Mutex, RwLock}; +use sc_rpc::utils::Subscription; +use sc_statement_store::{AddFilterError, LiveEventStream, SubscriptionHandle}; +use sp_statement_store::{hash_encoded, FilterId, Hash, OptimizedTopicFilter, Statement}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + sync::Arc, +}; +use tokio::sync::mpsc; + +use crate::common::connections::{RegisteredConnection, RpcConnections}; + +pub const MAX_SUBSCRIPTIONS_PER_CONNECTION: usize = 16; +const REPLAY_CHUNK_BYTES: usize = 4 * 1024 * 1024; + +#[cfg(not(test))] +pub(crate) const PENDING_LIVE_HARD_CAP: usize = 64 * 1024; +#[cfg(test)] +pub(crate) const PENDING_LIVE_HARD_CAP: usize = 4; +#[cfg(not(test))] +pub(crate) const EMITTED_VIA_NEW_HARD_CAP: usize = 64 * 1024; +#[cfg(test)] +pub(crate) const EMITTED_VIA_NEW_HARD_CAP: usize = 4; + +pub(crate) enum ControlMessage { + Wake, +} + +pub(crate) enum AddFilterOutcome { + Added(FilterId), + LimitReached, +} + +struct PendingReplay { + filter_id: FilterId, + snapshot: Vec>, +} + +struct PendingLiveStatement { + hash: Hash, + encoded: Vec, +} + +/// Per-subscription state shared between RPC handlers and the subscription task +pub(crate) struct SubscriptionState { + handle: SubscriptionHandle, + control_tx: mpsc::UnboundedSender, + replays_in_progress: HashSet, + cancelled_replays: HashSet, + pending_replays: VecDeque, + pending_live: VecDeque, + replayed_filter_ids_by_hash: HashMap>, + new_emitted_hashes: HashSet, + stopped: bool, +} + +impl SubscriptionState { + fn new(handle: SubscriptionHandle, control_tx: mpsc::UnboundedSender) -> Self { + Self { + handle, + control_tx, + replays_in_progress: HashSet::new(), + cancelled_replays: HashSet::new(), + pending_replays: VecDeque::new(), + pending_live: VecDeque::new(), + replayed_filter_ids_by_hash: HashMap::new(), + new_emitted_hashes: HashSet::new(), + stopped: false, + } + } + + fn record_filter_added(&mut self, filter_id: FilterId, snapshot: Vec>) { + self.replays_in_progress.insert(filter_id); + for encoded in &snapshot { + let hash = hash_encoded(encoded); + self.replayed_filter_ids_by_hash.entry(hash).or_default().insert(filter_id); + } + self.pending_replays.push_back(PendingReplay { filter_id, snapshot }); + } + + fn record_filter_removed(&mut self, filter_id: FilterId) -> bool { + let was_in_progress = self.replays_in_progress.remove(&filter_id); + if was_in_progress { + self.cancelled_replays.insert(filter_id); + } + self.replayed_filter_ids_by_hash.retain(|_hash, set| { + set.remove(&filter_id); + !set.is_empty() + }); + was_in_progress + } + + #[cfg(test)] + pub(crate) fn fill_pending_live_for_overflow_test(&mut self, filter: FilterId, count: usize) { + self.replays_in_progress.insert(filter); + for i in 0..count { + self.pending_live + .push_back(PendingLiveStatement { hash: [i as u8; 32], encoded: vec![i as u8] }); + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct SubscriptionKey { + conn_id: ConnectionId, + sub_id: String, +} + +impl SubscriptionKey { + fn new(conn_id: ConnectionId, sub_id: impl Into) -> Self { + Self { conn_id, sub_id: sub_id.into() } + } +} + +type SubscriptionStateRef = Arc>; +type SubscriptionRegistry = Arc>>; + +/// Long-lived registry owned by the RPC server +#[derive(Clone, Default)] +pub struct StatementSubscriptions { + rpc_connections: RpcConnections, + registry: SubscriptionRegistry, +} + +impl StatementSubscriptions { + pub fn new() -> Self { + Self { + registry: Arc::new(RwLock::new(HashMap::new())), + rpc_connections: RpcConnections::new(MAX_SUBSCRIPTIONS_PER_CONNECTION), + } + } + + /// Reserves a slot for a new subscription + pub fn reserve(&self, conn_id: ConnectionId) -> Option { + let reserved_token = self.rpc_connections.reserve_space(conn_id)?; + Some(ReservedSubscription { + conn_id, + token: Some(reserved_token), + registry: self.registry.clone(), + }) + } + + /// Gets a subscription owned by the connection + pub fn get(&self, conn_id: ConnectionId, sub_id: &str) -> Option { + if !self.rpc_connections.contains_identifier(conn_id, sub_id) { + return None; + } + self.registry.read().get(&SubscriptionKey::new(conn_id, sub_id)).cloned() + } +} + +/// Reservation for one pending subscription +pub struct ReservedSubscription { + conn_id: ConnectionId, + token: Option, + registry: SubscriptionRegistry, +} + +impl ReservedSubscription { + pub fn register( + mut self, + sub_id: String, + handle: SubscriptionHandle, + control_tx: mpsc::UnboundedSender, + ) -> Option { + let token = self.token.take()?; + let registered = token.register(sub_id.clone())?; + let state = Arc::new(Mutex::new(SubscriptionState::new(handle, control_tx))); + let key = SubscriptionKey::new(self.conn_id, sub_id); + { + let mut registry = self.registry.write(); + if registry.contains_key(&key) { + return None; + } + registry.insert(key.clone(), state.clone()); + } + Some(SubscriptionEntry { + key, + state, + _registered: registered, + registry: self.registry.clone(), + }) + } +} + +/// Registered subscription entry +pub struct SubscriptionEntry { + key: SubscriptionKey, + state: SubscriptionStateRef, + _registered: RegisteredConnection, + registry: SubscriptionRegistry, +} + +impl SubscriptionEntry { + pub fn state(&self) -> &SubscriptionStateRef { + &self.state + } +} + +impl Drop for SubscriptionEntry { + fn drop(&mut self) { + self.registry.write().remove(&self.key); + } +} + +pub(crate) fn add_filter_sync( + state: &Arc>, + filter: OptimizedTopicFilter, +) -> Result { + let (control_tx, filter_id) = { + let mut subscription = state.lock(); + let handle = subscription.handle.clone(); + match handle.add_filter(filter) { + Ok((filter_id, snapshot)) => { + subscription.record_filter_added(filter_id, snapshot); + (subscription.control_tx.clone(), filter_id) + }, + Err(AddFilterError::LimitReached) => return Ok(AddFilterOutcome::LimitReached), + Err(AddFilterError::Store(e)) => { + return Err(Error::InternalError(format!("add_filter failed: {e}"))) + }, + } + }; + let _ = control_tx.send(ControlMessage::Wake); + Ok(AddFilterOutcome::Added(filter_id)) +} + +pub(crate) fn remove_filter_sync( + state: &Arc>, + filter_id: FilterId, +) -> bool { + let (was_present, control_tx) = { + let mut subscription = state.lock(); + let _ = subscription.handle.remove_filter(filter_id); + (subscription.record_filter_removed(filter_id), subscription.control_tx.clone()) + }; + let _ = control_tx.send(ControlMessage::Wake); + was_present +} + +pub(crate) fn filter_id_to_string(id: FilterId) -> String { + id.as_u64().to_string() +} +pub(crate) fn parse_filter_id(s: &str) -> Option { + s.parse::().ok().map(FilterId::new) +} + +pub async fn run_subscription_task( + sink: Subscription, + state: Arc>, + mut live_stream: LiveEventStream, + mut control_rx: mpsc::UnboundedReceiver, +) { + loop { + if !drain_pending_replays(&sink, &state).await { + return; + } + if !drain_pending_live(&sink, &state).await { + return; + } + if state.lock().stopped { + return; + } + + tokio::select! { + biased; + msg = control_rx.recv() => match msg { + Some(ControlMessage::Wake) => continue, + None => return, + }, + event = live_stream.next() => match event { + Some(event) => { + if !handle_live_event(&sink, &state, event).await { + return; + } + }, + None => return, + }, + } + } +} + +#[cfg_attr(test, derive(Debug, PartialEq))] +pub(crate) enum LiveAction { + Stop, + Noop, + Emit(NewStatementEntry), +} + +pub(crate) fn decide_live_action( + subscription: &mut SubscriptionState, + event: sp_statement_store::LiveStatementEvent, +) -> LiveAction { + if subscription.stopped { + return LiveAction::Noop; + } + + let matched_filters: HashSet = match Statement::decode(&mut &event.encoded[..]) { + Ok(stmt) => subscription.handle.matched_filter_ids(&stmt).into_iter().collect(), + Err(_) => { + log::warn!( + target: super::LOG_TARGET, + "Corrupt statement bytes received on live stream; skipping", + ); + return LiveAction::Noop; + }, + }; + + if matched_filters.iter().any(|f| subscription.replays_in_progress.contains(f)) { + if subscription.pending_live.len() >= PENDING_LIVE_HARD_CAP { + log::warn!( + target: super::LOG_TARGET, + "pending_live cap reached on statement subscription; sending stop", + ); + subscription.stopped = true; + LiveAction::Stop + } else { + subscription + .pending_live + .push_back(PendingLiveStatement { hash: event.hash, encoded: event.encoded }); + LiveAction::Noop + } + } else { + compute_new_statements_action(subscription, event.hash, event.encoded, &matched_filters) + } +} + +async fn handle_live_event( + sink: &Subscription, + state: &Arc>, + event: sp_statement_store::LiveStatementEvent, +) -> bool { + let action = { + let mut subscription = state.lock(); + decide_live_action(&mut subscription, event) + }; + + match action { + LiveAction::Stop => { + let _ = send_event(sink, &SubscribeEvent::Stop).await; + false + }, + LiveAction::Noop => true, + LiveAction::Emit(entry) => { + send_event(sink, &SubscribeEvent::NewStatements { statements: vec![entry] }).await + }, + } +} + +fn compute_new_statements_action( + subscription: &mut SubscriptionState, + hash: Hash, + encoded: Vec, + filter_ids: &HashSet, +) -> LiveAction { + if subscription.new_emitted_hashes.contains(&hash) { + return LiveAction::Noop; + } + let replayed_filter_ids = subscription.replayed_filter_ids_by_hash.get(&hash); + let filter_ids: Vec = filter_ids + .iter() + .filter(|f| replayed_filter_ids.map_or(true, |set| !set.contains(f))) + .map(|f| filter_id_to_string(*f)) + .collect(); + if filter_ids.is_empty() { + return LiveAction::Noop; + } + if subscription.new_emitted_hashes.len() >= EMITTED_VIA_NEW_HARD_CAP { + log::warn!( + target: super::LOG_TARGET, + "new_emitted_hashes cap reached on statement subscription; sending stop", + ); + subscription.stopped = true; + return LiveAction::Stop; + } + subscription.new_emitted_hashes.insert(hash); + LiveAction::Emit(NewStatementEntry { statement: sp_core::Bytes(encoded), filter_ids }) +} + +async fn drain_pending_replays(sink: &Subscription, state: &Arc>) -> bool { + loop { + let next = state.lock().pending_replays.pop_front(); + let Some(replay) = next else { return true }; + if !run_replay(sink, state, replay).await { + return false; + } + } +} + +async fn run_replay( + sink: &Subscription, + state: &Arc>, + replay: PendingReplay, +) -> bool { + let filter_id = replay.filter_id; + let filter_id_str = filter_id_to_string(filter_id); + let mut iter = replay.snapshot.into_iter().peekable(); + + while iter.peek().is_some() { + if state.lock().cancelled_replays.contains(&filter_id) { + let mut subscription = state.lock(); + subscription.cancelled_replays.remove(&filter_id); + return true; + } + + let mut chunk: Vec = Vec::new(); + let mut chunk_bytes: usize = 0; + while let Some(stmt) = iter.peek() { + let est = stmt.len().saturating_mul(2).saturating_add(8); + if !chunk.is_empty() && chunk_bytes + est > REPLAY_CHUNK_BYTES { + break; + } + let bytes = iter.next().expect("peek above; qed"); + chunk_bytes = chunk_bytes.saturating_add(est); + chunk.push(sp_core::Bytes(bytes)); + if chunk_bytes >= REPLAY_CHUNK_BYTES { + break; + } + } + debug_assert!(!chunk.is_empty(), "loop guard; qed"); + + let event = SubscribeEvent::ReplayStatements { + filter_id: filter_id_str.clone(), + statements: chunk, + }; + if !send_event(sink, &event).await { + return false; + } + } + + let cancelled = { + let mut subscription = state.lock(); + if subscription.cancelled_replays.remove(&filter_id) { + true + } else { + subscription.replays_in_progress.remove(&filter_id); + false + } + }; + if cancelled { + return true; + } + send_event(sink, &SubscribeEvent::ReplayDone { filter_id: filter_id_str }).await +} + +async fn drain_pending_live(sink: &Subscription, state: &Arc>) -> bool { + loop { + let to_send = { + let mut subscription = state.lock(); + if subscription.stopped { + return true; + } + let mut action = LiveAction::Noop; + let mut idx = 0usize; + while idx < subscription.pending_live.len() { + let entry = &subscription.pending_live[idx]; + let stmt = match Statement::decode(&mut &entry.encoded[..]) { + Ok(stmt) => stmt, + Err(_) => { + subscription.pending_live.remove(idx); + continue; + }, + }; + let matched_filters: HashSet = + subscription.handle.matched_filter_ids(&stmt).into_iter().collect(); + let still_blocked = + matched_filters.iter().any(|f| subscription.replays_in_progress.contains(f)); + if still_blocked { + idx += 1; + continue; + } + let popped = subscription.pending_live.remove(idx).expect("idx in range; qed"); + let next_action = compute_new_statements_action( + &mut subscription, + popped.hash, + popped.encoded, + &matched_filters, + ); + if !matches!(next_action, LiveAction::Noop) { + action = next_action; + break; + } + } + action + }; + + match to_send { + LiveAction::Emit(entry) => { + if !send_event(sink, &SubscribeEvent::NewStatements { statements: vec![entry] }) + .await + { + return false; + } + }, + LiveAction::Stop => { + let _ = send_event(sink, &SubscribeEvent::Stop).await; + return false; + }, + LiveAction::Noop => return true, + } + } +} + +async fn send_event(sink: &Subscription, event: &SubscribeEvent) -> bool { + match sink.send(event).await { + Ok(()) => true, + Err(_) => false, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_statement_store::{MultiFilterSubscriptionApi, Store}; + use sp_statement_store::OptimizedTopicFilter; + use std::sync::Arc; + use tokio::sync::mpsc; + + fn empty_subscription_state() -> Arc> { + let dir = tempfile::tempdir().expect("tempdir"); + let mut db_path: std::path::PathBuf = dir.path().into(); + db_path.push("db"); + + type Extrinsic = sp_runtime::OpaqueExtrinsic; + type Hash = sp_core::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type BlockNumber = u64; + type Header = sp_runtime::generic::Header; + type Block = sp_runtime::generic::Block; + type MockBackend = sc_client_api::in_mem::Backend; + + #[derive(Clone)] + struct TestClient; + + impl sc_client_api::StorageProvider for TestClient { + fn storage( + &self, + _hash: Hash, + _key: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result> { + use codec::Encode; + let allowance = + sp_statement_store::StatementAllowance { max_count: 1000, max_size: 1_000_000 }; + Ok(Some(sc_client_api::StorageData(allowance.encode()))) + } + fn storage_hash( + &self, + _: Hash, + _: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result> { + unimplemented!() + } + fn storage_keys( + &self, + _: Hash, + _: Option<&sc_client_api::StorageKey>, + _: Option<&sc_client_api::StorageKey>, + ) -> sp_blockchain::Result< + sc_client_api::backend::KeysIter< + >::State, + Block, + >, + > { + unimplemented!() + } + fn storage_pairs( + &self, + _: Hash, + _: Option<&sc_client_api::StorageKey>, + _: Option<&sc_client_api::StorageKey>, + ) -> sp_blockchain::Result< + sc_client_api::backend::PairsIter< + >::State, + Block, + >, + > { + unimplemented!() + } + fn child_storage( + &self, + _: Hash, + _: &sc_client_api::ChildInfo, + _: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result> { + unimplemented!() + } + fn child_storage_keys( + &self, + _: Hash, + _: sc_client_api::ChildInfo, + _: Option<&sc_client_api::StorageKey>, + _: Option<&sc_client_api::StorageKey>, + ) -> sp_blockchain::Result< + sc_client_api::backend::KeysIter< + >::State, + Block, + >, + > { + unimplemented!() + } + fn child_storage_hash( + &self, + _: Hash, + _: &sc_client_api::ChildInfo, + _: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result> { + unimplemented!() + } + fn closest_merkle_value( + &self, + _: Hash, + _: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result>> { + unimplemented!() + } + fn child_closest_merkle_value( + &self, + _: Hash, + _: &sc_client_api::ChildInfo, + _: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result>> { + unimplemented!() + } + } + + impl sp_blockchain::HeaderBackend for TestClient { + fn header(&self, _: Hash) -> sp_blockchain::Result> { + unimplemented!() + } + fn info(&self) -> sp_blockchain::Info { + let h = sp_core::H256::repeat_byte(1); + sp_blockchain::Info { + best_hash: h, + best_number: 0, + genesis_hash: Default::default(), + finalized_hash: h, + finalized_number: 1, + finalized_state: None, + number_leaves: 0, + block_gap: None, + } + } + fn status(&self, _: Hash) -> sp_blockchain::Result { + unimplemented!() + } + fn number(&self, _: Hash) -> sp_blockchain::Result> { + unimplemented!() + } + fn hash(&self, _: BlockNumber) -> sp_blockchain::Result> { + unimplemented!() + } + } + + let store = Arc::new( + Store::new::( + &db_path, + Default::default(), + Arc::new(TestClient), + Arc::new(sc_keystore::LocalKeystore::in_memory()), + None, + Box::new(sp_core::testing::TaskExecutor::new()), + ) + .expect("store"), + ); + std::mem::forget(dir); + + let (handle, _live) = store.create_subscription(); + let (tx, _rx) = mpsc::unbounded_channel(); + Arc::new(Mutex::new(SubscriptionState::new(handle, tx))) + } + + fn dummy_event() -> sp_statement_store::LiveStatementEvent { + use codec::Encode; + let encoded = sp_statement_store::Statement::new().encode(); + sp_statement_store::LiveStatementEvent { + hash: [0xab; 32], + encoded, + matched_filter_ids: vec![], + } + } + + fn register_any_filter(state: &Arc>) -> FilterId { + let handle = state.lock().handle.clone(); + let (id, _snapshot) = handle.add_filter(OptimizedTopicFilter::Any).expect("add_filter"); + id + } + + #[test] + fn subscription_registry_is_scoped_by_connection_id() { + let subscriptions = StatementSubscriptions::new(); + let conn_a = ConnectionId(1); + let conn_b = ConnectionId(2); + let sub_id = "same-subscription-id".to_string(); + + let handle_a = empty_subscription_state().lock().handle.clone(); + let handle_b = empty_subscription_state().lock().handle.clone(); + let (tx_a, _rx_a) = mpsc::unbounded_channel(); + let (tx_b, _rx_b) = mpsc::unbounded_channel(); + + let entry_a = subscriptions + .reserve(conn_a) + .unwrap() + .register(sub_id.clone(), handle_a, tx_a) + .unwrap(); + let entry_b = subscriptions + .reserve(conn_b) + .unwrap() + .register(sub_id.clone(), handle_b, tx_b) + .unwrap(); + + let state_a = subscriptions.get(conn_a, &sub_id).unwrap(); + let state_b = subscriptions.get(conn_b, &sub_id).unwrap(); + assert!(Arc::ptr_eq(&state_a, entry_a.state())); + assert!(Arc::ptr_eq(&state_b, entry_b.state())); + assert!(!Arc::ptr_eq(&state_a, &state_b)); + + drop(entry_a); + assert!(subscriptions.get(conn_a, &sub_id).is_none()); + assert!(subscriptions.get(conn_b, &sub_id).is_some()); + } + + #[test] + fn decide_live_action_noop_when_stopped() { + let state = empty_subscription_state(); + let mut subscription = state.lock(); + subscription.stopped = true; + assert_eq!(decide_live_action(&mut subscription, dummy_event()), LiveAction::Noop); + } + + #[test] + fn decide_live_action_stop_on_overflow() { + let state = empty_subscription_state(); + let id = register_any_filter(&state); + let mut subscription = state.lock(); + subscription.fill_pending_live_for_overflow_test(id, PENDING_LIVE_HARD_CAP); + assert_eq!(decide_live_action(&mut subscription, dummy_event()), LiveAction::Stop); + assert!(subscription.stopped); + } + + #[test] + fn decide_live_action_buffers_when_replay_in_progress_below_cap() { + let state = empty_subscription_state(); + let id = register_any_filter(&state); + let mut subscription = state.lock(); + subscription.fill_pending_live_for_overflow_test(id, PENDING_LIVE_HARD_CAP - 1); + assert_eq!(decide_live_action(&mut subscription, dummy_event()), LiveAction::Noop); + assert!(!subscription.stopped); + assert_eq!(subscription.pending_live.len(), PENDING_LIVE_HARD_CAP); + } + + #[test] + fn decide_live_action_stop_on_emitted_history_overflow() { + let state = empty_subscription_state(); + register_any_filter(&state); + let mut subscription = state.lock(); + for i in 0..EMITTED_VIA_NEW_HARD_CAP { + subscription.new_emitted_hashes.insert([i as u8; 32]); + } + + assert_eq!(decide_live_action(&mut subscription, dummy_event()), LiveAction::Stop); + assert!(subscription.stopped); + assert_eq!(subscription.new_emitted_hashes.len(), EMITTED_VIA_NEW_HARD_CAP); + } +} diff --git a/substrate/client/rpc-spec-v2/src/statement/tests.rs b/substrate/client/rpc-spec-v2/src/statement/tests.rs new file mode 100644 index 0000000000000..128a9f54aef26 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/statement/tests.rs @@ -0,0 +1,497 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{ + error::rpc_spec_v2::{INVALID_SUBSCRIPTION, REACHED_LIMITS}, + event::{NewStatementEntry, SubmitOutcome, SubscribeEvent}, + subscription::MAX_SUBSCRIPTIONS_PER_CONNECTION, + StatementSpec, StatementSpecApiServer, +}; +use codec::Encode; +use jsonrpsee::{core::server::Subscription as RpcSubscription, MethodsError, RpcModule}; +use sc_rpc::testing::TokioTestExecutor; +use sc_statement_store::Store; +use sp_core::Bytes; +use sp_statement_store::{Statement, StatementAllowance, TopicFilter}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +type Extrinsic = sp_runtime::OpaqueExtrinsic; +type Hash = sp_core::H256; +type Hashing = sp_runtime::traits::BlakeTwo256; +type BlockNumber = u64; +type Header = sp_runtime::generic::Header; +type Block = sp_runtime::generic::Block; +type MockBackend = sc_client_api::in_mem::Backend; + +#[derive(Clone)] +struct MockClient; + +impl sc_client_api::StorageProvider for MockClient { + fn storage( + &self, + _hash: Hash, + _key: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result> { + let allowance = StatementAllowance { max_count: 10_000, max_size: 100_000_000 }; + Ok(Some(sc_client_api::StorageData(allowance.encode()))) + } + + fn storage_hash( + &self, + _hash: Hash, + _key: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result> { + unimplemented!() + } + + fn storage_keys( + &self, + _hash: Hash, + _prefix: Option<&sc_client_api::StorageKey>, + _start_key: Option<&sc_client_api::StorageKey>, + ) -> sp_blockchain::Result< + sc_client_api::backend::KeysIter< + >::State, + Block, + >, + > { + unimplemented!() + } + + fn storage_pairs( + &self, + _hash: Hash, + _prefix: Option<&sc_client_api::StorageKey>, + _start_key: Option<&sc_client_api::StorageKey>, + ) -> sp_blockchain::Result< + sc_client_api::backend::PairsIter< + >::State, + Block, + >, + > { + unimplemented!() + } + + fn child_storage( + &self, + _hash: Hash, + _child_info: &sc_client_api::ChildInfo, + _key: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result> { + unimplemented!() + } + + fn child_storage_keys( + &self, + _hash: Hash, + _child_info: sc_client_api::ChildInfo, + _prefix: Option<&sc_client_api::StorageKey>, + _start_key: Option<&sc_client_api::StorageKey>, + ) -> sp_blockchain::Result< + sc_client_api::backend::KeysIter< + >::State, + Block, + >, + > { + unimplemented!() + } + + fn child_storage_hash( + &self, + _hash: Hash, + _child_info: &sc_client_api::ChildInfo, + _key: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result> { + unimplemented!() + } + + fn closest_merkle_value( + &self, + _hash: Hash, + _key: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result>> { + unimplemented!() + } + + fn child_closest_merkle_value( + &self, + _hash: Hash, + _child_info: &sc_client_api::ChildInfo, + _key: &sc_client_api::StorageKey, + ) -> sp_blockchain::Result>> { + unimplemented!() + } +} + +impl sp_blockchain::HeaderBackend for MockClient { + fn header(&self, _hash: Hash) -> sp_blockchain::Result> { + unimplemented!() + } + + fn info(&self) -> sp_blockchain::Info { + let best = sp_core::H256::repeat_byte(1); + sp_blockchain::Info { + best_hash: best, + best_number: 0, + genesis_hash: Default::default(), + finalized_hash: best, + finalized_number: 1, + finalized_state: None, + number_leaves: 0, + block_gap: None, + } + } + + fn status(&self, _hash: Hash) -> sp_blockchain::Result { + unimplemented!() + } + + fn number(&self, _hash: Hash) -> sp_blockchain::Result> { + unimplemented!() + } + + fn hash(&self, _number: BlockNumber) -> sp_blockchain::Result> { + unimplemented!() + } +} + +fn make_server() -> RpcModule> { + let executor = Arc::new(TokioTestExecutor::default()); + let client = Arc::new(MockClient); + let temp_dir = tempfile::tempdir().expect("tempdir"); + let mut db_path: std::path::PathBuf = temp_dir.path().into(); + db_path.push("db"); + + let store = Store::new::( + &db_path, + Default::default(), + client, + Arc::new(sc_keystore::LocalKeystore::in_memory()), + None, + Box::new((*executor).clone()), + ) + .expect("store"); + std::mem::forget(temp_dir); + + StatementSpec::new(Arc::new(store), executor).into_rpc() +} + +fn signed_statement(seed: u8, topics: &[[u8; 32]]) -> Statement { + use sp_core::Pair as _; + let kp = sp_core::ed25519::Pair::from_string(&format!("//Seed{seed}"), None) + .expect("valid derivation path"); + let mut s = Statement::new(); + + for (i, t) in topics.iter().enumerate() { + s.set_topic(i, (*t).into()); + } + + s.set_expiry_from_parts(u32::MAX, seed as u32); + s.sign_ed25519_private(&kp); + s +} + +fn encoded(s: &Statement) -> Bytes { + Bytes::from(s.encode()) +} + +async fn subscribe(rpc: &RpcModule>) -> RpcSubscription { + rpc.subscribe_unbounded("statement_unstable_subscribe", jsonrpsee::rpc_params![]) + .await + .expect("subscribe") +} + +fn sub_id_string(sub: &RpcSubscription) -> String { + match sub.subscription_id() { + jsonrpsee::types::SubscriptionId::Num(n) => n.to_string(), + jsonrpsee::types::SubscriptionId::Str(s) => s.to_string(), + } +} + +async fn next_event(sub: &mut RpcSubscription) -> SubscribeEvent { + let (event, _) = tokio::time::timeout(Duration::from_secs(5), sub.next::()) + .await + .expect("subscribe event timed out") + .expect("subscription stream ended") + .expect("decode subscribe event"); + event +} + +async fn collect_replay(sub: &mut RpcSubscription, filter_id: &str) -> Vec { + let deadline = Instant::now() + Duration::from_secs(10); + let mut statements = Vec::new(); + while Instant::now() < deadline { + match next_event(sub).await { + SubscribeEvent::ReplayStatements { filter_id: fid, statements: chunk } + if fid == filter_id => + { + statements.extend(chunk) + }, + SubscribeEvent::ReplayDone { filter_id: fid } if fid == filter_id => return statements, + other => panic!("unexpected event before replayDone: {other:?}"), + } + } + panic!("replayDone for {filter_id} not observed in time") +} + +#[tokio::test] +async fn duplicate_submit_returns_known() { + let rpc = make_server(); + + let statement = signed_statement(42, &[[9u8; 32]]); + let outcome: SubmitOutcome = rpc + .call("statement_unstable_submit", (encoded(&statement),)) + .await + .expect("submit new"); + assert_eq!(outcome, SubmitOutcome::New); + + let outcome: SubmitOutcome = rpc + .call("statement_unstable_submit", (encoded(&statement),)) + .await + .expect("submit known"); + assert_eq!(outcome, SubmitOutcome::Known); +} + +#[tokio::test] +async fn submit_then_subscribe_replays_and_then_lives() { + let rpc = make_server(); + + let s_pre = signed_statement(1, &[[7u8; 32]]); + let outcome: SubmitOutcome = rpc + .call("statement_unstable_submit", (encoded(&s_pre),)) + .await + .expect("submit pre"); + assert!(matches!(outcome, SubmitOutcome::New)); + + let mut sub = subscribe(&rpc).await; + let sub_id = sub_id_string(&sub); + let filter_id: super::AddFilterResponse = rpc + .call("statement_unstable_add_filter", (sub_id, TopicFilter::Any)) + .await + .expect("add_filter Any"); + let filter_id = match filter_id { + super::AddFilterResponse::Ok(id) => id, + super::AddFilterResponse::LimitReached(_) => panic!("unexpected LimitReached"), + }; + + let replayed = collect_replay(&mut sub, &filter_id).await; + assert_eq!(replayed.len(), 1); + + let s_post = signed_statement(2, &[[7u8; 32]]); + let outcome: SubmitOutcome = rpc + .call("statement_unstable_submit", (encoded(&s_post),)) + .await + .expect("submit post"); + assert!(matches!(outcome, SubmitOutcome::New)); + + match next_event(&mut sub).await { + SubscribeEvent::NewStatements { statements } => { + assert_eq!(statements.len(), 1); + let NewStatementEntry { filter_ids, .. } = &statements[0]; + assert_eq!(filter_ids, &vec![filter_id.clone()]); + }, + other => panic!("expected NewStatements, got {other:?}"), + } +} + +#[tokio::test] +async fn replayed_statement_excluded_from_new_for_same_filter() { + let rpc = make_server(); + + let s = signed_statement(11, &[[5u8; 32]]); + let _: SubmitOutcome = + rpc.call("statement_unstable_submit", (encoded(&s),)).await.expect("submit"); + + let mut sub = subscribe(&rpc).await; + let sub_id = sub_id_string(&sub); + let resp: super::AddFilterResponse = rpc + .call("statement_unstable_add_filter", (sub_id, TopicFilter::Any)) + .await + .expect("add_filter"); + let filter_id = match resp { + super::AddFilterResponse::Ok(id) => id, + super::AddFilterResponse::LimitReached(_) => panic!("unexpected LimitReached"), + }; + + let replayed = collect_replay(&mut sub, &filter_id).await; + assert_eq!(replayed.len(), 1); + + let _: SubmitOutcome = + rpc.call("statement_unstable_submit", (encoded(&s),)).await.expect("re-submit"); + + let s2 = signed_statement(12, &[[5u8; 32]]); + let _: SubmitOutcome = + rpc.call("statement_unstable_submit", (encoded(&s2),)).await.expect("submit s2"); + + match next_event(&mut sub).await { + SubscribeEvent::NewStatements { statements } => { + assert_eq!(statements.len(), 1); + let entry = &statements[0]; + assert_eq!( + entry.statement.0, + s2.encode(), + "expected fresh statement, not replayed one" + ); + assert_eq!(entry.filter_ids, vec![filter_id]); + }, + other => panic!("expected NewStatements with fresh statement, got {other:?}"), + } +} + +#[tokio::test] +async fn add_filter_rejects_match_any_topic_filter() { + use sp_runtime::BoundedVec; + + let rpc = make_server(); + let sub = subscribe(&rpc).await; + let sub_id = sub_id_string(&sub); + + let topic = sp_statement_store::Topic::from([1u8; 32]); + let filter = TopicFilter::MatchAny(BoundedVec::truncate_from(vec![topic])); + let err = rpc + .call::<_, super::AddFilterResponse>("statement_unstable_add_filter", (sub_id, filter)) + .await + .expect_err("matchAny must be rejected"); + + let s = err.to_string(); + assert!(s.contains("matchAny")); + drop(sub); +} + +#[tokio::test] +async fn add_filter_returns_limit_reached_when_filter_cap_is_exhausted() { + let rpc = make_server(); + let sub = subscribe(&rpc).await; + let sub_id = sub_id_string(&sub); + + for i in 0..sc_statement_store::MAX_FILTERS_PER_SUBSCRIPTION { + let resp: super::AddFilterResponse = rpc + .call("statement_unstable_add_filter", (sub_id.clone(), TopicFilter::Any)) + .await + .unwrap_or_else(|e| panic!("filter {i} should be accepted: {e:?}")); + assert!( + matches!(resp, super::AddFilterResponse::Ok(_)), + "filter {i} should return Ok, got {resp:?}" + ); + } + + let resp: super::AddFilterResponse = rpc + .call("statement_unstable_add_filter", (sub_id, TopicFilter::Any)) + .await + .expect("filter beyond cap returns a successful RPC response"); + assert_eq!(resp, super::AddFilterResponse::limit_reached()); + + drop(sub); +} + +#[tokio::test] +async fn remove_filter_frees_rpc_filter_capacity() { + let rpc = make_server(); + let sub = subscribe(&rpc).await; + let sub_id = sub_id_string(&sub); + let mut filter_ids = Vec::new(); + + for i in 0..sc_statement_store::MAX_FILTERS_PER_SUBSCRIPTION { + let resp: super::AddFilterResponse = rpc + .call("statement_unstable_add_filter", (sub_id.clone(), TopicFilter::Any)) + .await + .unwrap_or_else(|e| panic!("filter {i} should be accepted: {e:?}")); + let filter_id = match resp { + super::AddFilterResponse::Ok(id) => id, + super::AddFilterResponse::LimitReached(_) => { + panic!("filter {i} unexpectedly reached the cap") + }, + }; + filter_ids.push(filter_id); + } + + let _: () = rpc + .call("statement_unstable_remove_filter", (sub_id.clone(), filter_ids[0].clone())) + .await + .expect("remove_filter frees capacity"); + + let resp: super::AddFilterResponse = rpc + .call("statement_unstable_add_filter", (sub_id, TopicFilter::Any)) + .await + .expect("replacement filter should be accepted"); + assert!( + matches!(resp, super::AddFilterResponse::Ok(_)), + "replacement filter should return Ok, got {resp:?}" + ); + + drop(sub); +} + +#[tokio::test] +async fn add_filter_for_unknown_subscription_yields_invalid_subscription() { + let rpc = make_server(); + let err = rpc + .call::<_, super::AddFilterResponse>( + "statement_unstable_add_filter", + ("does-not-exist".to_string(), TopicFilter::Any), + ) + .await + .expect_err("unknown subscription must error"); + + let object = match err { + MethodsError::JsonRpc(e) => e, + other => panic!("expected ErrorObject, got {other:?}"), + }; + assert_eq!(object.code(), INVALID_SUBSCRIPTION); +} + +#[tokio::test] +async fn remove_filter_is_silent_for_unknown_subscription_and_filter() { + let rpc = make_server(); + + let _: () = rpc + .call("statement_unstable_remove_filter", ("does-not-exist".to_string(), "0".to_string())) + .await + .expect("remove_filter on unknown sub is no-op"); + + let sub = subscribe(&rpc).await; + let sub_id = sub_id_string(&sub); + let _: () = rpc + .call("statement_unstable_remove_filter", (sub_id, "999".to_string())) + .await + .expect("remove_filter on unknown filter is no-op"); + drop(sub); +} + +#[tokio::test] +async fn per_connection_subscription_cap_returns_reached_limits() { + let rpc = make_server(); + + let mut subs = Vec::new(); + for _ in 0..MAX_SUBSCRIPTIONS_PER_CONNECTION { + subs.push(subscribe(&rpc).await); + } + + let err = rpc + .subscribe_unbounded("statement_unstable_subscribe", jsonrpsee::rpc_params![]) + .await + .expect_err("expected ReachedLimits at cap+1"); + let object = match err { + MethodsError::JsonRpc(e) => e, + other => panic!("expected ErrorObject, got {other:?}"), + }; + assert_eq!(object.code(), REACHED_LIMITS); +} diff --git a/substrate/client/statement-store/src/lib.rs b/substrate/client/statement-store/src/lib.rs index eef5ebbe57e98..4494c0637a9ff 100644 --- a/substrate/client/statement-store/src/lib.rs +++ b/substrate/client/statement-store/src/lib.rs @@ -73,11 +73,14 @@ use sp_statement_store::{ }; pub use sp_statement_store::{Error, StatementStore, MAX_TOPICS}; use std::{ - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - sync::Arc, + collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}, + sync::{atomic::AtomicU64, Arc}, time::{Duration, Instant}, }; -pub use subscription::StatementStoreSubscriptionApi; +pub use subscription::{ + AddFilterError, AtomicSnapshotProvider, LiveEventStream, MultiFilterSubscriptionApi, + StatementStoreSubscriptionApi, SubscriptionHandle, MAX_FILTERS_PER_SUBSCRIPTION, +}; const KEY_VERSION: &[u8] = b"version".as_slice(); const CURRENT_VERSION: u32 = 1; @@ -1628,6 +1631,52 @@ impl StatementStoreSubscriptionApi for Store { } } +impl AtomicSnapshotProvider for Store { + fn add_filter_atomically( + &self, + filter: &OptimizedTopicFilter, + register: &mut dyn FnMut(), + ) -> Result>> { + let mut existing_statements = Vec::new(); + let index = self.index.read(); + self.collect_statements_locked( + None, + filter, + &index, + &mut existing_statements, + |statement| Some(statement.encode()), + )?; + register(); + drop(index); + Ok(existing_statements) + } +} + +impl MultiFilterSubscriptionApi for Arc { + fn create_subscription(&self) -> (SubscriptionHandle, LiveEventStream) { + let filters = Arc::new(RwLock::new(HashMap::new())); + let next_filter_id = Arc::new(AtomicU64::new(0)); + let add_filter_lock = Arc::new(parking_lot::Mutex::new(())); + let snapshot_provider: Arc = self.clone(); + + let (_subscription_sender, subscription_stream) = + self.subscription_manager.subscribe(OptimizedTopicFilter::Any); + + let handle = SubscriptionHandle { + filters: filters.clone(), + next_filter_id, + snapshot_provider, + add_filter_lock, + }; + let stream = LiveEventStream { + matcher_stream: subscription_stream, + filters, + pending: VecDeque::new(), + }; + (handle, stream) + } +} + #[cfg(test)] mod tests { @@ -3234,4 +3283,265 @@ mod tests { let (existing, _sender, _stream) = store.subscribe_statement(filter_b).unwrap(); assert_eq!(existing.len(), 2, "Re-subscribe MatchAll([B]) should return s2 and s3"); } + + mod multi_filter { + use super::*; + use crate::{LiveEventStream, MultiFilterSubscriptionApi, SubscriptionHandle}; + use futures::StreamExt; + use sp_statement_store::{LiveStatementEvent, OptimizedTopicFilter}; + use std::{collections::HashSet, sync::Arc, time::Duration}; + + fn arc_test_store() -> (Arc, tempfile::TempDir) { + let (store, dir) = test_store(); + (Arc::new(store), dir) + } + + async fn collect_n( + stream: &mut LiveEventStream, + n: usize, + timeout: Duration, + ) -> Vec { + let mut events = Vec::new(); + for _ in 0..n { + match tokio::time::timeout(timeout, stream.next()).await { + Ok(Some(event)) => events.push(event), + _ => break, + } + } + events + } + + async fn drain_all( + stream: &mut LiveEventStream, + idle: Duration, + ) -> Vec { + let mut events = Vec::new(); + while let Ok(Some(event)) = tokio::time::timeout(idle, stream.next()).await { + events.push(event); + } + events + } + + #[tokio::test] + async fn create_subscription_no_filters_emits_empty_matched_ids() { + let (store, _dir) = arc_test_store(); + let (_handle, mut stream) = store.create_subscription(); + + let stmt = signed_statement(1); + let stmt_hash = stmt.hash(); + assert_eq!(store.submit(stmt.clone(), StatementSource::Local), SubmitResult::New); + + let events = collect_n(&mut stream, 1, Duration::from_secs(1)).await; + assert_eq!(events.len(), 1); + assert_eq!(events[0].hash, stmt_hash); + assert!(events[0].matched_filter_ids.is_empty()); + assert_eq!(events[0].encoded, stmt.encode()); + } + + #[tokio::test] + async fn add_filter_empty_store_returns_empty_snapshot() { + let (store, _dir) = arc_test_store(); + let (handle, _stream) = store.create_subscription(); + + let filter = OptimizedTopicFilter::MatchAll(HashSet::from([topic(1)])); + let (filter_id, snapshot) = handle.add_filter(filter).unwrap(); + assert!(snapshot.is_empty()); + assert_eq!(filter_id.as_u64(), 0); + assert_eq!(handle.filter_ids(), vec![filter_id]); + } + + #[tokio::test] + async fn add_filter_returns_existing_matching_statements() { + let (store, _dir) = arc_test_store(); + let topic_a = topic(1); + let topic_b = topic(2); + + let s1 = signed_statement_with_topics(1, &[topic_a], None); + let s2 = signed_statement_with_topics(2, &[topic_b], None); + let s3 = signed_statement_with_topics(3, &[topic_a], None); + store.submit(s1.clone(), StatementSource::Local); + store.submit(s2, StatementSource::Local); + store.submit(s3.clone(), StatementSource::Local); + + let (handle, _stream) = store.create_subscription(); + let filter = OptimizedTopicFilter::MatchAll(HashSet::from([topic_a])); + let (_id, snapshot) = handle.add_filter(filter).unwrap(); + let snapshot_set: HashSet<_> = snapshot.into_iter().collect(); + let expected: HashSet<_> = vec![s1.encode(), s3.encode()].into_iter().collect(); + assert_eq!(snapshot_set, expected); + } + + #[tokio::test] + async fn add_filter_returns_limit_reached_at_cap() { + let (store, _dir) = arc_test_store(); + let (handle, _stream) = store.create_subscription(); + + for i in 0..crate::subscription::MAX_FILTERS_PER_SUBSCRIPTION { + let (filter_id, snapshot) = handle + .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic(i as u64)]))) + .unwrap_or_else(|e| panic!("filter {i} should be accepted: {e:?}")); + assert_eq!(filter_id.as_u64(), i as u64); + assert!(snapshot.is_empty()); + } + + let err = handle + .add_filter(OptimizedTopicFilter::Any) + .expect_err("filter beyond cap must be rejected"); + assert_eq!(err, crate::subscription::AddFilterError::LimitReached); + assert_eq!( + handle.filter_ids().len(), + crate::subscription::MAX_FILTERS_PER_SUBSCRIPTION, + ); + } + + #[tokio::test] + async fn removing_filter_frees_filter_capacity() { + let (store, _dir) = arc_test_store(); + let (handle, _stream) = store.create_subscription(); + let mut ids = Vec::new(); + + for i in 0..crate::subscription::MAX_FILTERS_PER_SUBSCRIPTION { + let (filter_id, _) = handle + .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic(i as u64)]))) + .unwrap_or_else(|e| panic!("filter {i} should be accepted: {e:?}")); + ids.push(filter_id); + } + + assert!(handle.remove_filter(ids[0])); + + let (replacement, snapshot) = handle + .add_filter(OptimizedTopicFilter::Any) + .expect("capacity freed by remove_filter"); + assert!(snapshot.is_empty()); + assert_eq!( + handle.filter_ids().len(), + crate::subscription::MAX_FILTERS_PER_SUBSCRIPTION, + ); + assert_eq!( + replacement.as_u64(), + crate::subscription::MAX_FILTERS_PER_SUBSCRIPTION as u64 + ); + } + + #[tokio::test] + async fn remove_filter_makes_subsequent_events_unmatched() { + let (store, _dir) = arc_test_store(); + let (handle, mut stream) = store.create_subscription(); + let topic_a = topic(1); + let filter = OptimizedTopicFilter::MatchAll(HashSet::from([topic_a])); + let (id, _snapshot) = handle.add_filter(filter).unwrap(); + + assert!(handle.remove_filter(id)); + assert!(!handle.remove_filter(id)); + + let stmt = signed_statement_with_topics(1, &[topic_a], None); + store.submit(stmt, StatementSource::Local); + let events = collect_n(&mut stream, 1, Duration::from_secs(1)).await; + assert_eq!(events.len(), 1); + assert!(events[0].matched_filter_ids.is_empty()); + } + + #[tokio::test] + async fn multiple_filters_match_independently() { + let (store, _dir) = arc_test_store(); + let (handle, mut stream) = store.create_subscription(); + let topic_a = topic(1); + let topic_b = topic(2); + + let (id_a, _) = handle + .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_a]))) + .unwrap(); + let (id_b, _) = handle + .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_b]))) + .unwrap(); + let (id_ab, _) = handle + .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_a, topic_b]))) + .unwrap(); + + let s1 = signed_statement_with_topics(1, &[topic_a], None); + store.submit(s1, StatementSource::Local); + let events = collect_n(&mut stream, 1, Duration::from_secs(1)).await; + assert_eq!(events.len(), 1); + let matched: HashSet<_> = events[0].matched_filter_ids.iter().copied().collect(); + assert_eq!(matched, HashSet::from([id_a])); + + let s2 = signed_statement_with_topics(2, &[topic_a, topic_b], None); + store.submit(s2, StatementSource::Local); + let events = collect_n(&mut stream, 1, Duration::from_secs(1)).await; + assert_eq!(events.len(), 1); + let matched: HashSet<_> = events[0].matched_filter_ids.iter().copied().collect(); + assert_eq!(matched, HashSet::from([id_a, id_b, id_ab])); + } + + #[tokio::test] + async fn cloned_handle_shares_filter_state() { + let (store, _dir) = arc_test_store(); + let (handle, _stream) = store.create_subscription(); + let topic_a = topic(1); + let topic_b = topic(2); + + let (id1, _) = handle + .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_a]))) + .unwrap(); + + let handle2: SubscriptionHandle = handle.clone(); + let (id2, _) = handle2 + .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_b]))) + .unwrap(); + + assert_ne!(id1, id2); + + let ids_via_a: HashSet<_> = handle.filter_ids().into_iter().collect(); + let ids_via_b: HashSet<_> = handle2.filter_ids().into_iter().collect(); + assert_eq!(ids_via_a, ids_via_b); + assert_eq!(ids_via_a, HashSet::from([id1, id2])); + + assert!(handle2.remove_filter(id1)); + assert_eq!(handle.filter_ids(), vec![id2]); + } + + #[tokio::test] + async fn add_filter_snapshot_and_live_events_cover_concurrent_submissions() { + let (store, _dir) = arc_test_store(); + let (handle, mut stream) = store.create_subscription(); + + const NUM_STATEMENTS: u8 = 50; + + let store_for_submitter = store.clone(); + let submitter = std::thread::spawn(move || { + for i in 0..NUM_STATEMENTS { + let stmt = signed_statement(i); + assert_eq!( + store_for_submitter.submit(stmt, StatementSource::Local), + SubmitResult::New + ); + std::thread::sleep(Duration::from_millis(1)); + } + }); + + std::thread::sleep(Duration::from_millis(10)); + let (filter_id, snapshot) = handle.add_filter(OptimizedTopicFilter::Any).unwrap(); + + submitter.join().unwrap(); + + let snapshot_hashes: HashSet<[u8; 32]> = snapshot + .iter() + .map(|bytes| Statement::decode(&mut &bytes[..]).unwrap().hash()) + .collect(); + + let mut live_with_filter: HashSet<[u8; 32]> = HashSet::new(); + for ev in drain_all(&mut stream, Duration::from_millis(300)).await { + if ev.matched_filter_ids.contains(&filter_id) { + live_with_filter.insert(ev.hash); + } + } + + let covered: HashSet<[u8; 32]> = + snapshot_hashes.union(&live_with_filter).copied().collect(); + let expected: HashSet<[u8; 32]> = + (0..NUM_STATEMENTS).map(|i| signed_statement(i).hash()).collect(); + + assert_eq!(covered, expected); + } + } } diff --git a/substrate/client/statement-store/src/subscription.rs b/substrate/client/statement-store/src/subscription.rs index 2e7b9861aae68..e44d24dc7650d 100644 --- a/substrate/client/statement-store/src/subscription.rs +++ b/substrate/client/statement-store/src/subscription.rs @@ -36,21 +36,61 @@ const MATCHERS_TASK_CHANNEL_BUFFER_SIZE: usize = 80_000; // Buffer size for individual subscriptions. const SUBSCRIPTION_BUFFER_SIZE: usize = 128; +/// Maximum number of active filters attached to one statement subscription +pub const MAX_FILTERS_PER_SUBSCRIPTION: usize = 128; + use futures::{Stream, StreamExt}; use itertools::Itertools; +use parking_lot::{Mutex, RwLock}; use crate::LOG_TARGET; use sc_utils::id_sequence::SeqID; -use sp_core::{traits::SpawnNamed, Bytes, Encode}; +use sp_core::{traits::SpawnNamed, Bytes, Decode, Encode}; pub use sp_statement_store::StatementStore; use sp_statement_store::{ - OptimizedTopicFilter, Result, Statement, StatementEvent, Topic, MAX_TOPICS, + FilterId, LiveStatementEvent, OptimizedTopicFilter, Result, Statement, StatementEvent, Topic, + MAX_TOPICS, }; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - sync::atomic::AtomicU64, + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + sync::{atomic::AtomicU64, Arc}, }; +/// Error returned when attaching a filter to a multi-filter subscription fails +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum AddFilterError { + /// The subscription already has the maximum number of active filters + LimitReached, + /// The store failed while collecting the replay snapshot + Store(sp_statement_store::Error), +} + +impl From for AddFilterError { + fn from(error: sp_statement_store::Error) -> Self { + AddFilterError::Store(error) + } +} + +impl std::fmt::Display for AddFilterError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AddFilterError::LimitReached => { + write!(f, "maximum number of filters for this subscription has been reached") + }, + AddFilterError::Store(error) => error.fmt(f), + } + } +} + +impl std::error::Error for AddFilterError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + AddFilterError::LimitReached => None, + AddFilterError::Store(error) => Some(error), + } + } +} + /// Trait for initiating statement store subscriptions from the RPC module. pub trait StatementStoreSubscriptionApi: Send + Sync { /// Subscribe to statements matching the topic filter. @@ -63,6 +103,124 @@ pub trait StatementStoreSubscriptionApi: Send + Sync { ) -> Result<(Vec>, async_channel::Sender, SubscriptionStatementsStream)>; } +/// Provides an atomic snapshot plus filter registration hook for [`SubscriptionHandle`] +pub trait AtomicSnapshotProvider: Send + Sync { + fn add_filter_atomically( + &self, + filter: &OptimizedTopicFilter, + register: &mut dyn FnMut(), + ) -> Result>>; +} + +/// Creates multi-filter subscriptions for the RPC module +pub trait MultiFilterSubscriptionApi: Send + Sync { + fn create_subscription(&self) -> (SubscriptionHandle, LiveEventStream); +} + +/// A handle that attaches, removes, and inspects filters for one multi-filter subscription +#[derive(Clone)] +pub struct SubscriptionHandle { + pub(crate) filters: Arc>>, + pub(crate) next_filter_id: Arc, + pub(crate) snapshot_provider: Arc, + pub(crate) add_filter_lock: Arc>, +} + +impl SubscriptionHandle { + /// Attaches a filter and returns its id with the replay snapshot + pub fn add_filter( + &self, + filter: OptimizedTopicFilter, + ) -> std::result::Result<(FilterId, Vec>), AddFilterError> { + let _guard = self.add_filter_lock.lock(); + if self.filters.read().len() >= MAX_FILTERS_PER_SUBSCRIPTION { + return Err(AddFilterError::LimitReached); + } + + let filter_id = + FilterId::new(self.next_filter_id.fetch_add(1, std::sync::atomic::Ordering::SeqCst)); + let filters_lock = self.filters.clone(); + let filter_to_register = filter.clone(); + let mut register = move || { + filters_lock.write().insert(filter_id, filter_to_register.clone()); + }; + let snapshot = self.snapshot_provider.add_filter_atomically(&filter, &mut register)?; + Ok((filter_id, snapshot)) + } + + pub fn remove_filter(&self, filter_id: FilterId) -> bool { + self.filters.write().remove(&filter_id).is_some() + } + + pub fn filter_ids(&self) -> Vec { + self.filters.read().keys().copied().collect() + } + + pub fn matched_filter_ids(&self, statement: &Statement) -> Vec { + self.filters + .read() + .iter() + .filter(|(_, f)| f.matches(statement)) + .map(|(id, _)| *id) + .collect() + } +} + +/// Stream of live statement events for a multi-filter subscription +pub struct LiveEventStream { + pub(crate) matcher_stream: SubscriptionStatementsStream, + pub(crate) filters: Arc>>, + pub(crate) pending: VecDeque>, +} + +impl Stream for LiveEventStream { + type Item = LiveStatementEvent; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let this = self.get_mut(); + loop { + if let Some(bytes) = this.pending.pop_front() { + let Ok(statement) = Statement::decode(&mut &bytes[..]) else { + log::warn!( + target: LOG_TARGET, + "Corrupt statement received on multi-filter subscription" + ); + continue; + }; + let hash = statement.hash(); + let matched_filter_ids: Vec = { + let filters = this.filters.read(); + filters + .iter() + .filter(|(_, f)| f.matches(&statement)) + .map(|(id, _)| *id) + .collect() + }; + return std::task::Poll::Ready(Some(LiveStatementEvent { + hash, + encoded: bytes, + matched_filter_ids, + })); + } + + match this.matcher_stream.poll_next_unpin(cx) { + std::task::Poll::Ready(Some(StatementEvent::NewStatements { + statements, .. + })) => { + for encoded in statements { + this.pending.push_back(encoded.0); + } + }, + std::task::Poll::Ready(None) => return std::task::Poll::Ready(None), + std::task::Poll::Pending => return std::task::Poll::Pending, + } + } + } +} + /// Messages sent to matcher tasks. #[derive(Clone, Debug)] pub enum MatcherMessage { diff --git a/substrate/primitives/statement-store/src/lib.rs b/substrate/primitives/statement-store/src/lib.rs index ac9dafe2f89ae..2e06dbd114cee 100644 --- a/substrate/primitives/statement-store/src/lib.rs +++ b/substrate/primitives/statement-store/src/lib.rs @@ -206,8 +206,9 @@ pub fn get_allowance(account_id: impl AsRef<[u8]>) -> StatementAllowance { #[cfg(feature = "std")] pub use store_api::{ - Error, FilterDecision, InvalidReason, OptimizedTopicFilter, RejectionReason, Result, - StatementEvent, StatementSource, StatementStore, SubmitResult, TopicFilter, + Error, FilterDecision, FilterId, InvalidReason, LiveStatementEvent, OptimizedTopicFilter, + RejectionReason, Result, StatementEvent, StatementSource, StatementStore, SubmitResult, + TopicFilter, }; #[cfg(feature = "std")] diff --git a/substrate/primitives/statement-store/src/store_api.rs b/substrate/primitives/statement-store/src/store_api.rs index 54698fc172189..c08a17ac4c1c8 100644 --- a/substrate/primitives/statement-store/src/store_api.rs +++ b/substrate/primitives/statement-store/src/store_api.rs @@ -20,6 +20,33 @@ use crate::{Hash, Statement, Topic, MAX_ANY_TOPICS, MAX_TOPICS}; use sp_core::{bounded_vec::BoundedVec, Bytes, ConstU32}; use std::collections::HashSet; +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct FilterId(u64); + +impl FilterId { + pub fn new(id: u64) -> Self { + FilterId(id) + } + + pub fn as_u64(&self) -> u64 { + self.0 + } +} + +impl std::fmt::Display for FilterId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +/// Live statement event emitted by a multi-filter subscription +#[derive(Debug, Clone)] +pub struct LiveStatementEvent { + pub hash: Hash, + pub encoded: Vec, + pub matched_filter_ids: Vec, +} + /// Statement store error. #[derive(Debug, Clone, Eq, PartialEq, thiserror::Error)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] From b8de7a98319e8a3596e3c8d07fd072a0f970c7e1 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 5 May 2026 21:19:56 +0000 Subject: [PATCH 02/16] Update from github-actions[bot] running command 'fmt' --- substrate/client/rpc-spec-v2/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/src/statement/event.rs | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 4fd99504ac097..6dfc38cba97f8 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -60,7 +60,6 @@ sc-rpc = { workspace = true, default-features = true, features = ["test-helpers" sc-service = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -tempfile = { workspace = true } serde_json = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } @@ -68,4 +67,5 @@ sp-maybe-compressed-blob = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } substrate-test-runtime-transaction-pool = { workspace = true } +tempfile = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/statement/event.rs b/substrate/client/rpc-spec-v2/src/statement/event.rs index 0d4f60d31aa77..693754db49125 100644 --- a/substrate/client/rpc-spec-v2/src/statement/event.rs +++ b/substrate/client/rpc-spec-v2/src/statement/event.rs @@ -36,9 +36,7 @@ pub enum SubscribeEvent { filter_id: String, }, /// Statements admitted after matching filters were attached - NewStatements { - statements: Vec, - }, + NewStatements { statements: Vec }, /// Terminal notification Stop, } From 62bbfd22efea61bfc49cc2596d0bd261f6ac4cfa Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Fri, 8 May 2026 16:00:04 +0100 Subject: [PATCH 03/16] statement-store: support multi-filter RPC subscriptions --- .../rpc-spec-v2/src/statement/statement.rs | 9 +- .../rpc-spec-v2/src/statement/subscription.rs | 491 ++------- substrate/client/statement-store/src/lib.rs | 176 ++-- .../statement-store/src/subscription.rs | 933 ++++++++++++------ 4 files changed, 803 insertions(+), 806 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs index c80ce5afa8686..030878e72de1f 100644 --- a/substrate/client/rpc-spec-v2/src/statement/statement.rs +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -23,7 +23,7 @@ use crate::{ event::{AddFilterResponse, SubmitOutcome}, subscription::{ add_filter_sync, parse_filter_id, remove_filter_sync, run_subscription_task, - AddFilterOutcome, ControlMessage, StatementSubscriptions, + AddFilterOutcome, StatementSubscriptions, }, LOG_TARGET, }, @@ -41,7 +41,6 @@ use sp_statement_store::{ OptimizedTopicFilter, Statement, StatementSource, StatementStore, SubmitResult, TopicFilter, }; use std::sync::Arc; -use tokio::sync::mpsc; /// JSON-RPC server implementation for the `statement_unstable_*` methods pub struct StatementSpec { @@ -106,14 +105,12 @@ where let sub_id = read_subscription_id_as_string(&sink); let (handle, live_stream) = store.create_subscription(); - let (control_tx, control_rx) = mpsc::unbounded_channel::(); - let Some(entry) = reserved.register(sub_id.clone(), handle, control_tx) else { + let Some(entry) = reserved.register(sub_id.clone(), handle) else { log::debug!(target: LOG_TARGET, "duplicate subscription id {sub_id}; aborting"); return; }; - let state = entry.state().clone(); - let task = run_subscription_task(sink, state, live_stream, control_rx); + let task = run_subscription_task(sink, live_stream); executor.spawn( "statement-unstable-subscribe", Some("rpc"), diff --git a/substrate/client/rpc-spec-v2/src/statement/subscription.rs b/substrate/client/rpc-spec-v2/src/statement/subscription.rs index 44243c32ab3b5..267b4072fee5a 100644 --- a/substrate/client/rpc-spec-v2/src/statement/subscription.rs +++ b/substrate/client/rpc-spec-v2/src/statement/subscription.rs @@ -20,108 +20,33 @@ use crate::statement::{ error::Error, event::{NewStatementEntry, SubscribeEvent}, }; -use codec::Decode; use futures::StreamExt; use jsonrpsee::ConnectionId; use parking_lot::{Mutex, RwLock}; use sc_rpc::utils::Subscription; -use sc_statement_store::{AddFilterError, LiveEventStream, SubscriptionHandle}; -use sp_statement_store::{hash_encoded, FilterId, Hash, OptimizedTopicFilter, Statement}; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - sync::Arc, +use sc_statement_store::{ + AddFilterError, LiveEventStream, MultiFilterSubscriptionEvent, SubscriptionHandle, }; -use tokio::sync::mpsc; +use sp_statement_store::{FilterId, OptimizedTopicFilter}; +use std::{collections::HashMap, sync::Arc}; use crate::common::connections::{RegisteredConnection, RpcConnections}; pub const MAX_SUBSCRIPTIONS_PER_CONNECTION: usize = 16; -const REPLAY_CHUNK_BYTES: usize = 4 * 1024 * 1024; - -#[cfg(not(test))] -pub(crate) const PENDING_LIVE_HARD_CAP: usize = 64 * 1024; -#[cfg(test)] -pub(crate) const PENDING_LIVE_HARD_CAP: usize = 4; -#[cfg(not(test))] -pub(crate) const EMITTED_VIA_NEW_HARD_CAP: usize = 64 * 1024; -#[cfg(test)] -pub(crate) const EMITTED_VIA_NEW_HARD_CAP: usize = 4; - -pub(crate) enum ControlMessage { - Wake, -} pub(crate) enum AddFilterOutcome { Added(FilterId), LimitReached, } -struct PendingReplay { - filter_id: FilterId, - snapshot: Vec>, -} - -struct PendingLiveStatement { - hash: Hash, - encoded: Vec, -} - /// Per-subscription state shared between RPC handlers and the subscription task pub(crate) struct SubscriptionState { handle: SubscriptionHandle, - control_tx: mpsc::UnboundedSender, - replays_in_progress: HashSet, - cancelled_replays: HashSet, - pending_replays: VecDeque, - pending_live: VecDeque, - replayed_filter_ids_by_hash: HashMap>, - new_emitted_hashes: HashSet, - stopped: bool, } impl SubscriptionState { - fn new(handle: SubscriptionHandle, control_tx: mpsc::UnboundedSender) -> Self { - Self { - handle, - control_tx, - replays_in_progress: HashSet::new(), - cancelled_replays: HashSet::new(), - pending_replays: VecDeque::new(), - pending_live: VecDeque::new(), - replayed_filter_ids_by_hash: HashMap::new(), - new_emitted_hashes: HashSet::new(), - stopped: false, - } - } - - fn record_filter_added(&mut self, filter_id: FilterId, snapshot: Vec>) { - self.replays_in_progress.insert(filter_id); - for encoded in &snapshot { - let hash = hash_encoded(encoded); - self.replayed_filter_ids_by_hash.entry(hash).or_default().insert(filter_id); - } - self.pending_replays.push_back(PendingReplay { filter_id, snapshot }); - } - - fn record_filter_removed(&mut self, filter_id: FilterId) -> bool { - let was_in_progress = self.replays_in_progress.remove(&filter_id); - if was_in_progress { - self.cancelled_replays.insert(filter_id); - } - self.replayed_filter_ids_by_hash.retain(|_hash, set| { - set.remove(&filter_id); - !set.is_empty() - }); - was_in_progress - } - - #[cfg(test)] - pub(crate) fn fill_pending_live_for_overflow_test(&mut self, filter: FilterId, count: usize) { - self.replays_in_progress.insert(filter); - for i in 0..count { - self.pending_live - .push_back(PendingLiveStatement { hash: [i as u8; 32], encoded: vec![i as u8] }); - } + fn new(handle: SubscriptionHandle) -> Self { + Self { handle } } } @@ -186,11 +111,10 @@ impl ReservedSubscription { mut self, sub_id: String, handle: SubscriptionHandle, - control_tx: mpsc::UnboundedSender, ) -> Option { let token = self.token.take()?; let registered = token.register(sub_id.clone())?; - let state = Arc::new(Mutex::new(SubscriptionState::new(handle, control_tx))); + let state = Arc::new(Mutex::new(SubscriptionState::new(handle))); let key = SubscriptionKey::new(self.conn_id, sub_id); { let mut registry = self.registry.write(); @@ -199,29 +123,17 @@ impl ReservedSubscription { } registry.insert(key.clone(), state.clone()); } - Some(SubscriptionEntry { - key, - state, - _registered: registered, - registry: self.registry.clone(), - }) + Some(SubscriptionEntry { key, _registered: registered, registry: self.registry.clone() }) } } /// Registered subscription entry pub struct SubscriptionEntry { key: SubscriptionKey, - state: SubscriptionStateRef, _registered: RegisteredConnection, registry: SubscriptionRegistry, } -impl SubscriptionEntry { - pub fn state(&self) -> &SubscriptionStateRef { - &self.state - } -} - impl Drop for SubscriptionEntry { fn drop(&mut self) { self.registry.write().remove(&self.key); @@ -232,35 +144,21 @@ pub(crate) fn add_filter_sync( state: &Arc>, filter: OptimizedTopicFilter, ) -> Result { - let (control_tx, filter_id) = { - let mut subscription = state.lock(); - let handle = subscription.handle.clone(); - match handle.add_filter(filter) { - Ok((filter_id, snapshot)) => { - subscription.record_filter_added(filter_id, snapshot); - (subscription.control_tx.clone(), filter_id) - }, - Err(AddFilterError::LimitReached) => return Ok(AddFilterOutcome::LimitReached), - Err(AddFilterError::Store(e)) => { - return Err(Error::InternalError(format!("add_filter failed: {e}"))) - }, - } - }; - let _ = control_tx.send(ControlMessage::Wake); - Ok(AddFilterOutcome::Added(filter_id)) + let handle = state.lock().handle.clone(); + match handle.add_filter(filter) { + Ok(filter_id) => Ok(AddFilterOutcome::Added(filter_id)), + Err(AddFilterError::LimitReached) => Ok(AddFilterOutcome::LimitReached), + Err(AddFilterError::Store(e)) => { + Err(Error::InternalError(format!("add_filter failed: {e}"))) + }, + } } pub(crate) fn remove_filter_sync( state: &Arc>, filter_id: FilterId, ) -> bool { - let (was_present, control_tx) = { - let mut subscription = state.lock(); - let _ = subscription.handle.remove_filter(filter_id); - (subscription.record_filter_removed(filter_id), subscription.control_tx.clone()) - }; - let _ = control_tx.send(ControlMessage::Wake); - was_present + state.lock().handle.remove_filter(filter_id) } pub(crate) fn filter_id_to_string(id: FilterId) -> String { @@ -270,259 +168,52 @@ pub(crate) fn parse_filter_id(s: &str) -> Option { s.parse::().ok().map(FilterId::new) } -pub async fn run_subscription_task( - sink: Subscription, - state: Arc>, - mut live_stream: LiveEventStream, - mut control_rx: mpsc::UnboundedReceiver, -) { - loop { - if !drain_pending_replays(&sink, &state).await { - return; - } - if !drain_pending_live(&sink, &state).await { +pub async fn run_subscription_task(sink: Subscription, mut live_stream: LiveEventStream) { + while let Some(event) = live_stream.next().await { + if !send_subscription_event(&sink, event).await { return; } - if state.lock().stopped { - return; - } - - tokio::select! { - biased; - msg = control_rx.recv() => match msg { - Some(ControlMessage::Wake) => continue, - None => return, - }, - event = live_stream.next() => match event { - Some(event) => { - if !handle_live_event(&sink, &state, event).await { - return; - } - }, - None => return, - }, - } } } -#[cfg_attr(test, derive(Debug, PartialEq))] -pub(crate) enum LiveAction { - Stop, - Noop, - Emit(NewStatementEntry), -} - -pub(crate) fn decide_live_action( - subscription: &mut SubscriptionState, - event: sp_statement_store::LiveStatementEvent, -) -> LiveAction { - if subscription.stopped { - return LiveAction::Noop; - } - - let matched_filters: HashSet = match Statement::decode(&mut &event.encoded[..]) { - Ok(stmt) => subscription.handle.matched_filter_ids(&stmt).into_iter().collect(), - Err(_) => { - log::warn!( - target: super::LOG_TARGET, - "Corrupt statement bytes received on live stream; skipping", - ); - return LiveAction::Noop; +async fn send_subscription_event(sink: &Subscription, event: MultiFilterSubscriptionEvent) -> bool { + match event { + MultiFilterSubscriptionEvent::ReplayStatements { filter_id, statements } => { + let statements = statements.into_iter().map(sp_core::Bytes).collect(); + send_event( + sink, + &SubscribeEvent::ReplayStatements { + filter_id: filter_id_to_string(filter_id), + statements, + }, + ) + .await }, - }; - - if matched_filters.iter().any(|f| subscription.replays_in_progress.contains(f)) { - if subscription.pending_live.len() >= PENDING_LIVE_HARD_CAP { - log::warn!( - target: super::LOG_TARGET, - "pending_live cap reached on statement subscription; sending stop", - ); - subscription.stopped = true; - LiveAction::Stop - } else { - subscription - .pending_live - .push_back(PendingLiveStatement { hash: event.hash, encoded: event.encoded }); - LiveAction::Noop - } - } else { - compute_new_statements_action(subscription, event.hash, event.encoded, &matched_filters) - } -} - -async fn handle_live_event( - sink: &Subscription, - state: &Arc>, - event: sp_statement_store::LiveStatementEvent, -) -> bool { - let action = { - let mut subscription = state.lock(); - decide_live_action(&mut subscription, event) - }; - - match action { - LiveAction::Stop => { - let _ = send_event(sink, &SubscribeEvent::Stop).await; - false + MultiFilterSubscriptionEvent::ReplayDone { filter_id } => { + send_event( + sink, + &SubscribeEvent::ReplayDone { filter_id: filter_id_to_string(filter_id) }, + ) + .await }, - LiveAction::Noop => true, - LiveAction::Emit(entry) => { - send_event(sink, &SubscribeEvent::NewStatements { statements: vec![entry] }).await + MultiFilterSubscriptionEvent::NewStatement(event) => { + let filter_ids = + event.matched_filter_ids.into_iter().map(filter_id_to_string).collect(); + send_event( + sink, + &SubscribeEvent::NewStatements { + statements: vec![NewStatementEntry { + statement: sp_core::Bytes(event.encoded), + filter_ids, + }], + }, + ) + .await }, - } -} - -fn compute_new_statements_action( - subscription: &mut SubscriptionState, - hash: Hash, - encoded: Vec, - filter_ids: &HashSet, -) -> LiveAction { - if subscription.new_emitted_hashes.contains(&hash) { - return LiveAction::Noop; - } - let replayed_filter_ids = subscription.replayed_filter_ids_by_hash.get(&hash); - let filter_ids: Vec = filter_ids - .iter() - .filter(|f| replayed_filter_ids.map_or(true, |set| !set.contains(f))) - .map(|f| filter_id_to_string(*f)) - .collect(); - if filter_ids.is_empty() { - return LiveAction::Noop; - } - if subscription.new_emitted_hashes.len() >= EMITTED_VIA_NEW_HARD_CAP { - log::warn!( - target: super::LOG_TARGET, - "new_emitted_hashes cap reached on statement subscription; sending stop", - ); - subscription.stopped = true; - return LiveAction::Stop; - } - subscription.new_emitted_hashes.insert(hash); - LiveAction::Emit(NewStatementEntry { statement: sp_core::Bytes(encoded), filter_ids }) -} - -async fn drain_pending_replays(sink: &Subscription, state: &Arc>) -> bool { - loop { - let next = state.lock().pending_replays.pop_front(); - let Some(replay) = next else { return true }; - if !run_replay(sink, state, replay).await { - return false; - } - } -} - -async fn run_replay( - sink: &Subscription, - state: &Arc>, - replay: PendingReplay, -) -> bool { - let filter_id = replay.filter_id; - let filter_id_str = filter_id_to_string(filter_id); - let mut iter = replay.snapshot.into_iter().peekable(); - - while iter.peek().is_some() { - if state.lock().cancelled_replays.contains(&filter_id) { - let mut subscription = state.lock(); - subscription.cancelled_replays.remove(&filter_id); - return true; - } - - let mut chunk: Vec = Vec::new(); - let mut chunk_bytes: usize = 0; - while let Some(stmt) = iter.peek() { - let est = stmt.len().saturating_mul(2).saturating_add(8); - if !chunk.is_empty() && chunk_bytes + est > REPLAY_CHUNK_BYTES { - break; - } - let bytes = iter.next().expect("peek above; qed"); - chunk_bytes = chunk_bytes.saturating_add(est); - chunk.push(sp_core::Bytes(bytes)); - if chunk_bytes >= REPLAY_CHUNK_BYTES { - break; - } - } - debug_assert!(!chunk.is_empty(), "loop guard; qed"); - - let event = SubscribeEvent::ReplayStatements { - filter_id: filter_id_str.clone(), - statements: chunk, - }; - if !send_event(sink, &event).await { - return false; - } - } - - let cancelled = { - let mut subscription = state.lock(); - if subscription.cancelled_replays.remove(&filter_id) { - true - } else { - subscription.replays_in_progress.remove(&filter_id); + MultiFilterSubscriptionEvent::Stop => { + let _ = send_event(sink, &SubscribeEvent::Stop).await; false - } - }; - if cancelled { - return true; - } - send_event(sink, &SubscribeEvent::ReplayDone { filter_id: filter_id_str }).await -} - -async fn drain_pending_live(sink: &Subscription, state: &Arc>) -> bool { - loop { - let to_send = { - let mut subscription = state.lock(); - if subscription.stopped { - return true; - } - let mut action = LiveAction::Noop; - let mut idx = 0usize; - while idx < subscription.pending_live.len() { - let entry = &subscription.pending_live[idx]; - let stmt = match Statement::decode(&mut &entry.encoded[..]) { - Ok(stmt) => stmt, - Err(_) => { - subscription.pending_live.remove(idx); - continue; - }, - }; - let matched_filters: HashSet = - subscription.handle.matched_filter_ids(&stmt).into_iter().collect(); - let still_blocked = - matched_filters.iter().any(|f| subscription.replays_in_progress.contains(f)); - if still_blocked { - idx += 1; - continue; - } - let popped = subscription.pending_live.remove(idx).expect("idx in range; qed"); - let next_action = compute_new_statements_action( - &mut subscription, - popped.hash, - popped.encoded, - &matched_filters, - ); - if !matches!(next_action, LiveAction::Noop) { - action = next_action; - break; - } - } - action - }; - - match to_send { - LiveAction::Emit(entry) => { - if !send_event(sink, &SubscribeEvent::NewStatements { statements: vec![entry] }) - .await - { - return false; - } - }, - LiveAction::Stop => { - let _ = send_event(sink, &SubscribeEvent::Stop).await; - return false; - }, - LiveAction::Noop => return true, - } + }, } } @@ -537,9 +228,7 @@ async fn send_event(sink: &Subscription, event: &SubscribeEvent) -> bool { mod tests { use super::*; use sc_statement_store::{MultiFilterSubscriptionApi, Store}; - use sp_statement_store::OptimizedTopicFilter; use std::sync::Arc; - use tokio::sync::mpsc; fn empty_subscription_state() -> Arc> { let dir = tempfile::tempdir().expect("tempdir"); @@ -690,24 +379,7 @@ mod tests { std::mem::forget(dir); let (handle, _live) = store.create_subscription(); - let (tx, _rx) = mpsc::unbounded_channel(); - Arc::new(Mutex::new(SubscriptionState::new(handle, tx))) - } - - fn dummy_event() -> sp_statement_store::LiveStatementEvent { - use codec::Encode; - let encoded = sp_statement_store::Statement::new().encode(); - sp_statement_store::LiveStatementEvent { - hash: [0xab; 32], - encoded, - matched_filter_ids: vec![], - } - } - - fn register_any_filter(state: &Arc>) -> FilterId { - let handle = state.lock().handle.clone(); - let (id, _snapshot) = handle.add_filter(OptimizedTopicFilter::Any).expect("add_filter"); - id + Arc::new(Mutex::new(SubscriptionState::new(handle))) } #[test] @@ -719,71 +391,24 @@ mod tests { let handle_a = empty_subscription_state().lock().handle.clone(); let handle_b = empty_subscription_state().lock().handle.clone(); - let (tx_a, _rx_a) = mpsc::unbounded_channel(); - let (tx_b, _rx_b) = mpsc::unbounded_channel(); let entry_a = subscriptions .reserve(conn_a) .unwrap() - .register(sub_id.clone(), handle_a, tx_a) + .register(sub_id.clone(), handle_a) .unwrap(); - let entry_b = subscriptions + let _entry_b = subscriptions .reserve(conn_b) .unwrap() - .register(sub_id.clone(), handle_b, tx_b) + .register(sub_id.clone(), handle_b) .unwrap(); let state_a = subscriptions.get(conn_a, &sub_id).unwrap(); let state_b = subscriptions.get(conn_b, &sub_id).unwrap(); - assert!(Arc::ptr_eq(&state_a, entry_a.state())); - assert!(Arc::ptr_eq(&state_b, entry_b.state())); assert!(!Arc::ptr_eq(&state_a, &state_b)); drop(entry_a); assert!(subscriptions.get(conn_a, &sub_id).is_none()); assert!(subscriptions.get(conn_b, &sub_id).is_some()); } - - #[test] - fn decide_live_action_noop_when_stopped() { - let state = empty_subscription_state(); - let mut subscription = state.lock(); - subscription.stopped = true; - assert_eq!(decide_live_action(&mut subscription, dummy_event()), LiveAction::Noop); - } - - #[test] - fn decide_live_action_stop_on_overflow() { - let state = empty_subscription_state(); - let id = register_any_filter(&state); - let mut subscription = state.lock(); - subscription.fill_pending_live_for_overflow_test(id, PENDING_LIVE_HARD_CAP); - assert_eq!(decide_live_action(&mut subscription, dummy_event()), LiveAction::Stop); - assert!(subscription.stopped); - } - - #[test] - fn decide_live_action_buffers_when_replay_in_progress_below_cap() { - let state = empty_subscription_state(); - let id = register_any_filter(&state); - let mut subscription = state.lock(); - subscription.fill_pending_live_for_overflow_test(id, PENDING_LIVE_HARD_CAP - 1); - assert_eq!(decide_live_action(&mut subscription, dummy_event()), LiveAction::Noop); - assert!(!subscription.stopped); - assert_eq!(subscription.pending_live.len(), PENDING_LIVE_HARD_CAP); - } - - #[test] - fn decide_live_action_stop_on_emitted_history_overflow() { - let state = empty_subscription_state(); - register_any_filter(&state); - let mut subscription = state.lock(); - for i in 0..EMITTED_VIA_NEW_HARD_CAP { - subscription.new_emitted_hashes.insert([i as u8; 32]); - } - - assert_eq!(decide_live_action(&mut subscription, dummy_event()), LiveAction::Stop); - assert!(subscription.stopped); - assert_eq!(subscription.new_emitted_hashes.len(), EMITTED_VIA_NEW_HARD_CAP); - } } diff --git a/substrate/client/statement-store/src/lib.rs b/substrate/client/statement-store/src/lib.rs index 4494c0637a9ff..5948ab8c79646 100644 --- a/substrate/client/statement-store/src/lib.rs +++ b/substrate/client/statement-store/src/lib.rs @@ -73,12 +73,12 @@ use sp_statement_store::{ }; pub use sp_statement_store::{Error, StatementStore, MAX_TOPICS}; use std::{ - collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, sync::{atomic::AtomicU64, Arc}, time::{Duration, Instant}, }; pub use subscription::{ - AddFilterError, AtomicSnapshotProvider, LiveEventStream, MultiFilterSubscriptionApi, + AddFilterError, LiveEventStream, MultiFilterSubscriptionApi, MultiFilterSubscriptionEvent, StatementStoreSubscriptionApi, SubscriptionHandle, MAX_FILTERS_PER_SUBSCRIPTION, }; @@ -1631,12 +1631,12 @@ impl StatementStoreSubscriptionApi for Store { } } -impl AtomicSnapshotProvider for Store { - fn add_filter_atomically( +impl Store { + fn register_filter_with_snapshot( &self, filter: &OptimizedTopicFilter, - register: &mut dyn FnMut(), - ) -> Result>> { + register: &mut dyn FnMut(Vec>), + ) -> Result<()> { let mut existing_statements = Vec::new(); let index = self.index.read(); self.collect_statements_locked( @@ -1646,33 +1646,32 @@ impl AtomicSnapshotProvider for Store { &mut existing_statements, |statement| Some(statement.encode()), )?; - register(); + register(existing_statements); drop(index); - Ok(existing_statements) + Ok(()) } } impl MultiFilterSubscriptionApi for Arc { fn create_subscription(&self) -> (SubscriptionHandle, LiveEventStream) { - let filters = Arc::new(RwLock::new(HashMap::new())); + let state = Arc::new(parking_lot::Mutex::new( + crate::subscription::MultiFilterSubscriptionState::new(), + )); let next_filter_id = Arc::new(AtomicU64::new(0)); let add_filter_lock = Arc::new(parking_lot::Mutex::new(())); - let snapshot_provider: Arc = self.clone(); + let (wake_tx, wake_rx) = tokio::sync::mpsc::unbounded_channel(); - let (_subscription_sender, subscription_stream) = - self.subscription_manager.subscribe(OptimizedTopicFilter::Any); + let (sub_id, stream) = self.subscription_manager.subscribe_empty(state.clone(), wake_rx); let handle = SubscriptionHandle { - filters: filters.clone(), + sub_id, + state, next_filter_id, - snapshot_provider, + store: self.clone(), + matchers: self.subscription_manager.matchers(), + wake_tx, add_filter_lock, }; - let stream = LiveEventStream { - matcher_stream: subscription_stream, - filters, - pending: VecDeque::new(), - }; (handle, stream) } } @@ -3286,25 +3285,38 @@ mod tests { mod multi_filter { use super::*; - use crate::{LiveEventStream, MultiFilterSubscriptionApi, SubscriptionHandle}; + use crate::{ + LiveEventStream, MultiFilterSubscriptionApi, MultiFilterSubscriptionEvent, + SubscriptionHandle, + }; use futures::StreamExt; use sp_statement_store::{LiveStatementEvent, OptimizedTopicFilter}; - use std::{collections::HashSet, sync::Arc, time::Duration}; + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Duration, + }; fn arc_test_store() -> (Arc, tempfile::TempDir) { let (store, dir) = test_store(); (Arc::new(store), dir) } - async fn collect_n( + async fn collect_n_new( stream: &mut LiveEventStream, n: usize, timeout: Duration, ) -> Vec { let mut events = Vec::new(); - for _ in 0..n { + while events.len() < n { match tokio::time::timeout(timeout, stream.next()).await { - Ok(Some(event)) => events.push(event), + Ok(Some(MultiFilterSubscriptionEvent::NewStatement(event))) => { + events.push(event); + }, + Ok(Some(MultiFilterSubscriptionEvent::Stop)) => { + panic!("subscription stopped unexpectedly"); + }, + Ok(Some(_)) => {}, _ => break, } } @@ -3314,7 +3326,7 @@ mod tests { async fn drain_all( stream: &mut LiveEventStream, idle: Duration, - ) -> Vec { + ) -> Vec { let mut events = Vec::new(); while let Ok(Some(event)) = tokio::time::timeout(idle, stream.next()).await { events.push(event); @@ -3322,30 +3334,59 @@ mod tests { events } + async fn drain_replays( + stream: &mut LiveEventStream, + filter_ids: impl IntoIterator, + timeout: Duration, + ) -> HashMap>> { + let mut pending: HashSet<_> = filter_ids.into_iter().collect(); + let mut snapshots: HashMap<_, Vec<_>> = + pending.iter().map(|filter_id| (*filter_id, Vec::new())).collect(); + + while !pending.is_empty() { + match tokio::time::timeout(timeout, stream.next()).await { + Ok(Some(MultiFilterSubscriptionEvent::ReplayStatements { + filter_id, + statements, + })) if snapshots.contains_key(&filter_id) => { + snapshots.entry(filter_id).or_default().extend(statements); + }, + Ok(Some(MultiFilterSubscriptionEvent::ReplayStatements { .. })) => {}, + Ok(Some(MultiFilterSubscriptionEvent::ReplayDone { filter_id })) => { + pending.remove(&filter_id); + }, + Ok(Some(MultiFilterSubscriptionEvent::NewStatement(_))) => {}, + Ok(Some(MultiFilterSubscriptionEvent::Stop)) => { + panic!("subscription stopped unexpectedly"); + }, + Ok(None) => panic!("subscription ended before replay_done"), + Err(_) => panic!("timed out waiting for replay_done"), + } + } + snapshots + } + #[tokio::test] - async fn create_subscription_no_filters_emits_empty_matched_ids() { + async fn create_subscription_no_filters_emits_no_events() { let (store, _dir) = arc_test_store(); let (_handle, mut stream) = store.create_subscription(); let stmt = signed_statement(1); - let stmt_hash = stmt.hash(); assert_eq!(store.submit(stmt.clone(), StatementSource::Local), SubmitResult::New); - let events = collect_n(&mut stream, 1, Duration::from_secs(1)).await; - assert_eq!(events.len(), 1); - assert_eq!(events[0].hash, stmt_hash); - assert!(events[0].matched_filter_ids.is_empty()); - assert_eq!(events[0].encoded, stmt.encode()); + let events = collect_n_new(&mut stream, 1, Duration::from_secs(1)).await; + assert!(events.is_empty()); } #[tokio::test] - async fn add_filter_empty_store_returns_empty_snapshot() { + async fn add_filter_empty_store_emits_replay_done() { let (store, _dir) = arc_test_store(); - let (handle, _stream) = store.create_subscription(); + let (handle, mut stream) = store.create_subscription(); let filter = OptimizedTopicFilter::MatchAll(HashSet::from([topic(1)])); - let (filter_id, snapshot) = handle.add_filter(filter).unwrap(); - assert!(snapshot.is_empty()); + let filter_id = handle.add_filter(filter).unwrap(); + let snapshots = drain_replays(&mut stream, [filter_id], Duration::from_secs(1)).await; + assert!(snapshots[&filter_id].is_empty()); assert_eq!(filter_id.as_u64(), 0); assert_eq!(handle.filter_ids(), vec![filter_id]); } @@ -3363,10 +3404,12 @@ mod tests { store.submit(s2, StatementSource::Local); store.submit(s3.clone(), StatementSource::Local); - let (handle, _stream) = store.create_subscription(); + let (handle, mut stream) = store.create_subscription(); let filter = OptimizedTopicFilter::MatchAll(HashSet::from([topic_a])); - let (_id, snapshot) = handle.add_filter(filter).unwrap(); - let snapshot_set: HashSet<_> = snapshot.into_iter().collect(); + let id = handle.add_filter(filter).unwrap(); + let snapshots = drain_replays(&mut stream, [id], Duration::from_secs(1)).await; + let snapshot = snapshots.get(&id).expect("snapshot should exist"); + let snapshot_set: HashSet<_> = snapshot.iter().cloned().collect(); let expected: HashSet<_> = vec![s1.encode(), s3.encode()].into_iter().collect(); assert_eq!(snapshot_set, expected); } @@ -3377,11 +3420,10 @@ mod tests { let (handle, _stream) = store.create_subscription(); for i in 0..crate::subscription::MAX_FILTERS_PER_SUBSCRIPTION { - let (filter_id, snapshot) = handle + let filter_id = handle .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic(i as u64)]))) .unwrap_or_else(|e| panic!("filter {i} should be accepted: {e:?}")); assert_eq!(filter_id.as_u64(), i as u64); - assert!(snapshot.is_empty()); } let err = handle @@ -3401,7 +3443,7 @@ mod tests { let mut ids = Vec::new(); for i in 0..crate::subscription::MAX_FILTERS_PER_SUBSCRIPTION { - let (filter_id, _) = handle + let filter_id = handle .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic(i as u64)]))) .unwrap_or_else(|e| panic!("filter {i} should be accepted: {e:?}")); ids.push(filter_id); @@ -3409,10 +3451,9 @@ mod tests { assert!(handle.remove_filter(ids[0])); - let (replacement, snapshot) = handle + let replacement = handle .add_filter(OptimizedTopicFilter::Any) .expect("capacity freed by remove_filter"); - assert!(snapshot.is_empty()); assert_eq!( handle.filter_ids().len(), crate::subscription::MAX_FILTERS_PER_SUBSCRIPTION, @@ -3429,16 +3470,15 @@ mod tests { let (handle, mut stream) = store.create_subscription(); let topic_a = topic(1); let filter = OptimizedTopicFilter::MatchAll(HashSet::from([topic_a])); - let (id, _snapshot) = handle.add_filter(filter).unwrap(); + let id = handle.add_filter(filter).unwrap(); assert!(handle.remove_filter(id)); assert!(!handle.remove_filter(id)); let stmt = signed_statement_with_topics(1, &[topic_a], None); store.submit(stmt, StatementSource::Local); - let events = collect_n(&mut stream, 1, Duration::from_secs(1)).await; - assert_eq!(events.len(), 1); - assert!(events[0].matched_filter_ids.is_empty()); + let events = collect_n_new(&mut stream, 1, Duration::from_secs(1)).await; + assert!(events.is_empty()); } #[tokio::test] @@ -3448,26 +3488,26 @@ mod tests { let topic_a = topic(1); let topic_b = topic(2); - let (id_a, _) = handle + let id_a = handle .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_a]))) .unwrap(); - let (id_b, _) = handle + let id_b = handle .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_b]))) .unwrap(); - let (id_ab, _) = handle + let id_ab = handle .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_a, topic_b]))) .unwrap(); let s1 = signed_statement_with_topics(1, &[topic_a], None); store.submit(s1, StatementSource::Local); - let events = collect_n(&mut stream, 1, Duration::from_secs(1)).await; + let events = collect_n_new(&mut stream, 1, Duration::from_secs(1)).await; assert_eq!(events.len(), 1); let matched: HashSet<_> = events[0].matched_filter_ids.iter().copied().collect(); assert_eq!(matched, HashSet::from([id_a])); let s2 = signed_statement_with_topics(2, &[topic_a, topic_b], None); store.submit(s2, StatementSource::Local); - let events = collect_n(&mut stream, 1, Duration::from_secs(1)).await; + let events = collect_n_new(&mut stream, 1, Duration::from_secs(1)).await; assert_eq!(events.len(), 1); let matched: HashSet<_> = events[0].matched_filter_ids.iter().copied().collect(); assert_eq!(matched, HashSet::from([id_a, id_b, id_ab])); @@ -3480,12 +3520,12 @@ mod tests { let topic_a = topic(1); let topic_b = topic(2); - let (id1, _) = handle + let id1 = handle .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_a]))) .unwrap(); let handle2: SubscriptionHandle = handle.clone(); - let (id2, _) = handle2 + let id2 = handle2 .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_b]))) .unwrap(); @@ -3520,19 +3560,27 @@ mod tests { }); std::thread::sleep(Duration::from_millis(10)); - let (filter_id, snapshot) = handle.add_filter(OptimizedTopicFilter::Any).unwrap(); + let filter_id = handle.add_filter(OptimizedTopicFilter::Any).unwrap(); submitter.join().unwrap(); - let snapshot_hashes: HashSet<[u8; 32]> = snapshot - .iter() - .map(|bytes| Statement::decode(&mut &bytes[..]).unwrap().hash()) - .collect(); - + let mut snapshot_hashes: HashSet<[u8; 32]> = HashSet::new(); let mut live_with_filter: HashSet<[u8; 32]> = HashSet::new(); - for ev in drain_all(&mut stream, Duration::from_millis(300)).await { - if ev.matched_filter_ids.contains(&filter_id) { - live_with_filter.insert(ev.hash); + for event in drain_all(&mut stream, Duration::from_millis(300)).await { + match event { + MultiFilterSubscriptionEvent::ReplayStatements { statements, .. } => { + snapshot_hashes.extend( + statements + .iter() + .map(|bytes| Statement::decode(&mut &bytes[..]).unwrap().hash()), + ); + }, + MultiFilterSubscriptionEvent::NewStatement(event) + if event.matched_filter_ids.contains(&filter_id) => + { + live_with_filter.insert(event.hash); + }, + _ => {}, } } diff --git a/substrate/client/statement-store/src/subscription.rs b/substrate/client/statement-store/src/subscription.rs index e44d24dc7650d..b996d06fe5a9d 100644 --- a/substrate/client/statement-store/src/subscription.rs +++ b/substrate/client/statement-store/src/subscription.rs @@ -38,14 +38,18 @@ const SUBSCRIPTION_BUFFER_SIZE: usize = 128; /// Maximum number of active filters attached to one statement subscription pub const MAX_FILTERS_PER_SUBSCRIPTION: usize = 128; +const REPLAY_CHUNK_RAW_BYTES: usize = 4 * 1024 * 1024; +const PENDING_REPLAYS_HARD_CAP: usize = MAX_FILTERS_PER_SUBSCRIPTION; +const PENDING_LIVE_HARD_CAP: usize = 64 * 1024; +const EMITTED_VIA_NEW_HARD_CAP: usize = 64 * 1024; use futures::{Stream, StreamExt}; use itertools::Itertools; -use parking_lot::{Mutex, RwLock}; +use parking_lot::Mutex; -use crate::LOG_TARGET; +use crate::{Store, LOG_TARGET}; use sc_utils::id_sequence::SeqID; -use sp_core::{traits::SpawnNamed, Bytes, Decode, Encode}; +use sp_core::{traits::SpawnNamed, Bytes, Encode}; pub use sp_statement_store::StatementStore; use sp_statement_store::{ FilterId, LiveStatementEvent, OptimizedTopicFilter, Result, Statement, StatementEvent, Topic, @@ -54,7 +58,9 @@ use sp_statement_store::{ use std::{ collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, sync::{atomic::AtomicU64, Arc}, + task::Poll, }; +use tokio::sync::mpsc; /// Error returned when attaching a filter to a multi-filter subscription fails #[derive(Debug, Clone, PartialEq, Eq)] @@ -103,78 +109,297 @@ pub trait StatementStoreSubscriptionApi: Send + Sync { ) -> Result<(Vec>, async_channel::Sender, SubscriptionStatementsStream)>; } -/// Provides an atomic snapshot plus filter registration hook for [`SubscriptionHandle`] -pub trait AtomicSnapshotProvider: Send + Sync { - fn add_filter_atomically( - &self, - filter: &OptimizedTopicFilter, - register: &mut dyn FnMut(), - ) -> Result>>; -} - /// Creates multi-filter subscriptions for the RPC module pub trait MultiFilterSubscriptionApi: Send + Sync { + /// Creates an empty subscription that can receive filters dynamically fn create_subscription(&self) -> (SubscriptionHandle, LiveEventStream); } /// A handle that attaches, removes, and inspects filters for one multi-filter subscription #[derive(Clone)] pub struct SubscriptionHandle { - pub(crate) filters: Arc>>, + pub(crate) sub_id: SeqID, + pub(crate) state: Arc>, pub(crate) next_filter_id: Arc, - pub(crate) snapshot_provider: Arc, + pub(crate) store: Arc, + pub(crate) matchers: SubscriptionsMatchersHandlers, + pub(crate) wake_tx: mpsc::UnboundedSender<()>, pub(crate) add_filter_lock: Arc>, } impl SubscriptionHandle { - /// Attaches a filter and returns its id with the replay snapshot + /// Attaches a filter and returns its id pub fn add_filter( &self, filter: OptimizedTopicFilter, - ) -> std::result::Result<(FilterId, Vec>), AddFilterError> { + ) -> std::result::Result { let _guard = self.add_filter_lock.lock(); - if self.filters.read().len() >= MAX_FILTERS_PER_SUBSCRIPTION { - return Err(AddFilterError::LimitReached); + { + let state = self.state.lock(); + if state.active_filter_ids.len() >= MAX_FILTERS_PER_SUBSCRIPTION || + state.pending_replays.len() >= PENDING_REPLAYS_HARD_CAP + { + return Err(AddFilterError::LimitReached); + } } let filter_id = FilterId::new(self.next_filter_id.fetch_add(1, std::sync::atomic::Ordering::SeqCst)); - let filters_lock = self.filters.clone(); + let matchers = self.matchers.clone(); + let sub_id = self.sub_id; let filter_to_register = filter.clone(); - let mut register = move || { - filters_lock.write().insert(filter_id, filter_to_register.clone()); + let state = self.state.clone(); + let wake_tx = self.wake_tx.clone(); + let mut register = move |snapshot| { + state.lock().record_filter_added(filter_id, snapshot); + matchers.send_by_seq_id( + sub_id, + MatcherMessage::AddFilter { sub_id, filter_id, filter: filter_to_register.clone() }, + ); + let _ = wake_tx.send(()); }; - let snapshot = self.snapshot_provider.add_filter_atomically(&filter, &mut register)?; - Ok((filter_id, snapshot)) + self.store.register_filter_with_snapshot(&filter, &mut register)?; + Ok(filter_id) } pub fn remove_filter(&self, filter_id: FilterId) -> bool { - self.filters.write().remove(&filter_id).is_some() + let _guard = self.add_filter_lock.lock(); + if !self.state.lock().record_filter_removed(filter_id) { + return false; + } + self.matchers.send_by_seq_id( + self.sub_id, + MatcherMessage::RemoveFilter { sub_id: self.sub_id, filter_id }, + ); + let _ = self.wake_tx.send(()); + true } pub fn filter_ids(&self) -> Vec { - self.filters.read().keys().copied().collect() + self.state.lock().active_filter_ids.iter().copied().collect() + } +} + +struct PendingReplay { + filter_id: FilterId, + snapshot: VecDeque>, +} + +struct PendingLiveStatement { + hash: sp_statement_store::Hash, + encoded: Vec, + matched_filter_ids: Vec, +} + +pub(crate) struct MultiFilterSubscriptionState { + active_filter_ids: HashSet, + replays_in_progress: HashSet, + pending_replays: VecDeque, + pending_live: VecDeque, + replayed_filter_ids_by_hash: HashMap>, + new_emitted_hashes: HashSet, + stopped: bool, + stop_emitted: bool, +} + +impl MultiFilterSubscriptionState { + pub(crate) fn new() -> Self { + Self { + active_filter_ids: HashSet::new(), + replays_in_progress: HashSet::new(), + pending_replays: VecDeque::new(), + pending_live: VecDeque::new(), + replayed_filter_ids_by_hash: HashMap::new(), + new_emitted_hashes: HashSet::new(), + stopped: false, + stop_emitted: false, + } + } + + fn record_filter_added(&mut self, filter_id: FilterId, snapshot: Vec>) { + self.active_filter_ids.insert(filter_id); + self.replays_in_progress.insert(filter_id); + for encoded in &snapshot { + let hash = sp_statement_store::hash_encoded(encoded); + self.replayed_filter_ids_by_hash.entry(hash).or_default().insert(filter_id); + } + self.pending_replays + .push_back(PendingReplay { filter_id, snapshot: snapshot.into() }); + } + + fn record_filter_removed(&mut self, filter_id: FilterId) -> bool { + if !self.active_filter_ids.remove(&filter_id) { + return false; + } + self.replays_in_progress.remove(&filter_id); + self.pending_replays.retain(|replay| replay.filter_id != filter_id); + self.replayed_filter_ids_by_hash.retain(|_hash, set| { + set.remove(&filter_id); + !set.is_empty() + }); + true + } + + fn next_event(&mut self) -> Option { + if self.stopped { + if self.stop_emitted { + return None; + } + self.stop_emitted = true; + return Some(MultiFilterSubscriptionEvent::Stop); + } + if let Some(event) = self.next_replay_event() { + return Some(event); + } + self.next_pending_live_event() + } + + fn next_replay_event(&mut self) -> Option { + let replay = self.pending_replays.front_mut()?; + let filter_id = replay.filter_id; + if replay.snapshot.is_empty() { + self.pending_replays.pop_front(); + self.replays_in_progress.remove(&filter_id); + return Some(MultiFilterSubscriptionEvent::ReplayDone { filter_id }); + } + + let mut statements = Vec::new(); + let mut chunk_bytes = 0usize; + while let Some(stmt) = replay.snapshot.front() { + if !statements.is_empty() && chunk_bytes + stmt.len() > REPLAY_CHUNK_RAW_BYTES { + break; + } + let bytes = replay.snapshot.pop_front().expect("front checked above; qed"); + chunk_bytes = chunk_bytes.saturating_add(bytes.len()); + statements.push(bytes); + if chunk_bytes >= REPLAY_CHUNK_RAW_BYTES { + break; + } + } + Some(MultiFilterSubscriptionEvent::ReplayStatements { filter_id, statements }) + } + + fn next_pending_live_event(&mut self) -> Option { + let mut idx = 0usize; + while idx < self.pending_live.len() { + let entry = &self.pending_live[idx]; + let matched_filters = self.active_matched_filters(&entry.matched_filter_ids); + let still_blocked = + matched_filters.iter().any(|f| self.replays_in_progress.contains(f)); + if still_blocked { + idx += 1; + continue; + } + let popped = self.pending_live.remove(idx).expect("idx in range; qed"); + if let Some(event) = + self.new_statement_event(popped.hash, popped.encoded, &matched_filters) + { + return Some(event); + } + } + None + } + + fn handle_live_event( + &mut self, + event: LiveStatementEvent, + ) -> Option { + let matched_filters = self.active_matched_filters(&event.matched_filter_ids); + if matched_filters.is_empty() { + return None; + } + if matched_filters.iter().any(|f| self.replays_in_progress.contains(f)) { + if self.pending_live.len() >= PENDING_LIVE_HARD_CAP { + log::warn!( + target: LOG_TARGET, + "pending_live cap reached on statement subscription; sending stop", + ); + self.stopped = true; + } else { + self.pending_live.push_back(PendingLiveStatement { + hash: event.hash, + encoded: event.encoded, + matched_filter_ids: event.matched_filter_ids, + }); + } + return None; + } + + self.new_statement_event(event.hash, event.encoded, &matched_filters) + } + + fn new_statement_event( + &mut self, + hash: sp_statement_store::Hash, + encoded: Vec, + filter_ids: &HashSet, + ) -> Option { + if self.new_emitted_hashes.contains(&hash) { + return None; + } + + let replayed_filter_ids = self.replayed_filter_ids_by_hash.get(&hash); + let matched_filter_ids: Vec = filter_ids + .iter() + .filter(|f| replayed_filter_ids.map_or(true, |set| !set.contains(f))) + .copied() + .collect(); + + if matched_filter_ids.is_empty() { + return None; + } + + if self.new_emitted_hashes.len() >= EMITTED_VIA_NEW_HARD_CAP { + log::warn!( + target: LOG_TARGET, + "new_emitted_hashes cap reached on statement subscription; sending stop", + ); + self.stopped = true; + return None; + } + + self.new_emitted_hashes.insert(hash); + Some(MultiFilterSubscriptionEvent::NewStatement(LiveStatementEvent { + hash, + encoded, + matched_filter_ids, + })) } - pub fn matched_filter_ids(&self, statement: &Statement) -> Vec { - self.filters - .read() + fn active_matched_filters(&self, filter_ids: &[FilterId]) -> HashSet { + filter_ids .iter() - .filter(|(_, f)| f.matches(statement)) - .map(|(id, _)| *id) + .filter(|filter_id| self.active_filter_ids.contains(filter_id)) + .copied() .collect() } } -/// Stream of live statement events for a multi-filter subscription +/// Event emitted by a multi-filter subscription +#[derive(Debug, Clone)] +pub enum MultiFilterSubscriptionEvent { + ReplayStatements { + filter_id: FilterId, + statements: Vec>, + }, + ReplayDone { + filter_id: FilterId, + }, + NewStatement(LiveStatementEvent), + Stop, +} + +/// Stream of multi-filter subscription events pub struct LiveEventStream { - pub(crate) matcher_stream: SubscriptionStatementsStream, - pub(crate) filters: Arc>>, - pub(crate) pending: VecDeque>, + rx: async_channel::Receiver, + control_rx: mpsc::UnboundedReceiver<()>, + state: Arc>, + sub_id: SeqID, + matchers: SubscriptionsMatchersHandlers, } impl Stream for LiveEventStream { - type Item = LiveStatementEvent; + type Item = MultiFilterSubscriptionEvent; fn poll_next( self: std::pin::Pin<&mut Self>, @@ -182,52 +407,47 @@ impl Stream for LiveEventStream { ) -> std::task::Poll> { let this = self.get_mut(); loop { - if let Some(bytes) = this.pending.pop_front() { - let Ok(statement) = Statement::decode(&mut &bytes[..]) else { - log::warn!( - target: LOG_TARGET, - "Corrupt statement received on multi-filter subscription" - ); - continue; - }; - let hash = statement.hash(); - let matched_filter_ids: Vec = { - let filters = this.filters.read(); - filters - .iter() - .filter(|(_, f)| f.matches(&statement)) - .map(|(id, _)| *id) - .collect() - }; - return std::task::Poll::Ready(Some(LiveStatementEvent { - hash, - encoded: bytes, - matched_filter_ids, - })); + if let Some(event) = this.state.lock().next_event() { + return Poll::Ready(Some(event)); } - - match this.matcher_stream.poll_next_unpin(cx) { - std::task::Poll::Ready(Some(StatementEvent::NewStatements { - statements, .. - })) => { - for encoded in statements { - this.pending.push_back(encoded.0); + match this.control_rx.poll_recv(cx) { + Poll::Ready(Some(())) => continue, + Poll::Ready(None) | Poll::Pending => {}, + } + match this.rx.poll_next_unpin(cx) { + Poll::Ready(Some(event)) => { + if let Some(event) = this.state.lock().handle_live_event(event) { + return Poll::Ready(Some(event)); } + continue; }, - std::task::Poll::Ready(None) => return std::task::Poll::Ready(None), - std::task::Poll::Pending => return std::task::Poll::Pending, + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, } } } } +impl Drop for LiveEventStream { + fn drop(&mut self) { + self.matchers + .send_by_seq_id(self.sub_id, MatcherMessage::Unsubscribe(self.sub_id)); + } +} + /// Messages sent to matcher tasks. #[derive(Clone, Debug)] pub enum MatcherMessage { /// A new statement has been submitted. NewStatement(Statement), /// A new subscription has been created. - Subscribe(SubscriptionInfo), + Subscribe { info: IndexedSubscription, tx: async_channel::Sender }, + /// A new multi-filter subscription has been created + SubscribeEmpty { seq_id: SeqID, tx: async_channel::Sender }, + /// Add a filter to an existing multi-filter subscription + AddFilter { sub_id: SeqID, filter_id: FilterId, filter: OptimizedTopicFilter }, + /// Remove a filter from an existing multi-filter subscription + RemoveFilter { sub_id: SeqID, filter_id: FilterId }, /// Unsubscribe the subscription with the given ID. Unsubscribe(SeqID), } @@ -268,8 +488,17 @@ impl SubscriptionsHandle { Ok(MatcherMessage::NewStatement(statement)) => { subscriptions.notify_matching_filters(&statement); }, - Ok(MatcherMessage::Subscribe(info)) => { - subscriptions.subscribe(info); + Ok(MatcherMessage::Subscribe { info, tx }) => { + subscriptions.subscribe(info, tx); + }, + Ok(MatcherMessage::SubscribeEmpty { seq_id, tx }) => { + subscriptions.subscribe_empty(seq_id, tx); + }, + Ok(MatcherMessage::AddFilter { sub_id, filter_id, filter }) => { + subscriptions.add_filter(sub_id, filter_id, filter); + }, + Ok(MatcherMessage::RemoveFilter { sub_id, filter_id }) => { + subscriptions.remove_filter(sub_id, filter_id); }, Ok(MatcherMessage::Unsubscribe(seq_id)) => { subscriptions.unsubscribe(seq_id); @@ -306,11 +535,15 @@ impl SubscriptionsHandle { ) -> (async_channel::Sender, SubscriptionStatementsStream) { let next_id = self.next_id(); let (tx, rx) = async_channel::bounded(SUBSCRIPTION_BUFFER_SIZE); - let subscription_info = - SubscriptionInfo { topic_filter: topic_filter.clone(), seq_id: next_id, tx }; + let subscription_info = IndexedSubscription { + topic_filter: topic_filter.clone(), + seq_id: next_id, + filter_key: SubscriptionFilterKey::Fixed, + }; + let subscription_tx = tx.clone(); let result = ( - subscription_info.tx.clone(), + tx, SubscriptionStatementsStream { rx, sub_id: subscription_info.seq_id, @@ -318,16 +551,61 @@ impl SubscriptionsHandle { }, ); - self.matchers - .send_by_seq_id(subscription_info.seq_id, MatcherMessage::Subscribe(subscription_info)); + self.matchers.send_by_seq_id( + subscription_info.seq_id, + MatcherMessage::Subscribe { info: subscription_info, tx: subscription_tx }, + ); result } + pub(crate) fn subscribe_empty( + &self, + state: Arc>, + control_rx: mpsc::UnboundedReceiver<()>, + ) -> (SeqID, LiveEventStream) { + let sub_id = self.next_id(); + let (tx, rx) = async_channel::bounded(SUBSCRIPTION_BUFFER_SIZE); + self.matchers + .send_by_seq_id(sub_id, MatcherMessage::SubscribeEmpty { seq_id: sub_id, tx }); + + let stream = + LiveEventStream { rx, control_rx, state, sub_id, matchers: self.matchers.clone() }; + (sub_id, stream) + } + pub(crate) fn notify(&self, statement: Statement) { self.matchers.send_all(MatcherMessage::NewStatement(statement)); } + + pub(crate) fn matchers(&self) -> SubscriptionsMatchersHandlers { + self.matchers.clone() + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +enum SubscriptionFilterKey { + Fixed, + Dynamic(FilterId), +} + +enum SubscriptionRecord { + Statements { + tx: async_channel::Sender, + filter: OptimizedTopicFilter, + }, + Live { + tx: async_channel::Sender, + filters: HashMap, + }, } +enum MatchedSubscription { + Statements, + Live(HashSet), +} + +type IndexedSubscriptionKey = (SeqID, SubscriptionFilterKey); + // Information about all subscriptions. // Each matcher task will have its own instance of this struct. struct SubscriptionsInfo { @@ -340,24 +618,25 @@ struct SubscriptionsInfo { // This structure allows efficient matching: when a statement arrives with N topics, // we only need to check subscriptions that require exactly N or fewer topics. subscriptions_match_all_by_topic: - HashMap; MAX_TOPICS]>, + HashMap; MAX_TOPICS]>, // Subscriptions organized by topic for MatchAny filters. - subscriptions_match_any_by_topic: HashMap>, + subscriptions_match_any_by_topic: + HashMap>, // Subscriptions that listen with Any filter (i.e., no topic filtering). - subscriptions_any: HashMap, - // Mapping from subscription ID to topic filter. - by_sub_id: HashMap, + subscriptions_any: HashMap, + // Mapping from subscription ID to subscription state. + by_sub_id: HashMap, } -// Information about a single subscription. +// Information about one indexed subscription filter. #[derive(Clone, Debug)] -pub(crate) struct SubscriptionInfo { +pub(crate) struct IndexedSubscription { // The filter used for this subscription. topic_filter: OptimizedTopicFilter, // The unique ID of this subscription. seq_id: SeqID, - // Channel to send matched statements to the subscriber. - tx: async_channel::Sender, + // The filter key within the subscription. + filter_key: SubscriptionFilterKey, } impl SubscriptionsInfo { @@ -371,18 +650,62 @@ impl SubscriptionsInfo { } // Subscribe a new subscription. - fn subscribe(&mut self, subscription_info: SubscriptionInfo) { + fn subscribe( + &mut self, + subscription_info: IndexedSubscription, + tx: async_channel::Sender, + ) { + self.by_sub_id.insert( + subscription_info.seq_id, + SubscriptionRecord::Statements { tx, filter: subscription_info.topic_filter.clone() }, + ); + self.index_filter(subscription_info); + } + + fn subscribe_empty(&mut self, seq_id: SeqID, tx: async_channel::Sender) { self.by_sub_id - .insert(subscription_info.seq_id, subscription_info.topic_filter.clone()); + .insert(seq_id, SubscriptionRecord::Live { tx, filters: HashMap::new() }); + } + + fn add_filter(&mut self, sub_id: SeqID, filter_id: FilterId, filter: OptimizedTopicFilter) { + let filter_key = SubscriptionFilterKey::Dynamic(filter_id); + let Some(record) = self.by_sub_id.get_mut(&sub_id) else { + return; + }; + let SubscriptionRecord::Live { filters, .. } = record else { + return; + }; + let Entry::Vacant(entry) = filters.entry(filter_id) else { return }; + entry.insert(filter.clone()); + + self.index_filter(IndexedSubscription { topic_filter: filter, seq_id: sub_id, filter_key }); + } + + fn remove_filter(&mut self, sub_id: SeqID, filter_id: FilterId) { + let Some(record) = self.by_sub_id.get_mut(&sub_id) else { + return; + }; + let SubscriptionRecord::Live { filters, .. } = record else { + return; + }; + let Some(filter) = filters.remove(&filter_id) else { + return; + }; + + self.remove_indexed_filter(sub_id, SubscriptionFilterKey::Dynamic(filter_id), &filter); + } + + fn index_filter(&mut self, subscription_info: IndexedSubscription) { + let index_key = (subscription_info.seq_id, subscription_info.filter_key); match &subscription_info.topic_filter { OptimizedTopicFilter::Any => { - self.subscriptions_any.insert(subscription_info.seq_id, subscription_info); + self.subscriptions_any.insert(index_key, subscription_info); }, OptimizedTopicFilter::MatchAll(topics) => { for topic in topics { self.subscriptions_match_all_by_topic.entry(*topic).or_default() [topics.len() - 1] - .insert(subscription_info.seq_id, subscription_info.clone()); + .insert(index_key, subscription_info.clone()); } }, OptimizedTopicFilter::MatchAny(topics) => { @@ -390,72 +713,105 @@ impl SubscriptionsInfo { self.subscriptions_match_any_by_topic .entry(*topic) .or_default() - .insert(subscription_info.seq_id, subscription_info.clone()); + .insert(index_key, subscription_info.clone()); } }, }; } - // Notify a single subscriber, marking it for unsubscribing if sending fails. - fn notify_subscriber( - &self, - subscription: &SubscriptionInfo, - bytes_to_send: Bytes, - needs_unsubscribing: &mut HashSet, - ) { - if let Err(err) = subscription.tx.try_send(StatementEvent::NewStatements { - statements: vec![bytes_to_send], - remaining: None, - }) { - log::debug!( - target: LOG_TARGET, - "Failed to send statement to subscriber {:?}: {:?} unsubscribing it", subscription.seq_id, err - ); - // Mark subscription for unsubscribing, to give it a chance to recover the buffers are - // generous enough, if subscription cannot keep up we unsubscribe it. - needs_unsubscribing.insert(subscription.seq_id); + fn notify_matching_filters(&mut self, statement: &Statement) { + let mut matches = HashMap::new(); + self.collect_match_all_subscribers(statement, &mut matches); + self.collect_match_any_subscribers(statement, &mut matches); + self.collect_any_subscribers(&mut matches); + + let encoded = statement.encode(); + let bytes_to_send: Bytes = encoded.clone().into(); + let mut needs_unsubscribing = HashSet::new(); + + for (sub_id, matched) in matches { + match (matched, self.by_sub_id.get(&sub_id)) { + ( + MatchedSubscription::Statements, + Some(SubscriptionRecord::Statements { tx, .. }), + ) => { + if let Err(err) = tx.try_send(StatementEvent::NewStatements { + statements: vec![bytes_to_send.clone()], + remaining: None, + }) { + log::debug!( + target: LOG_TARGET, + "Failed to send statement to subscriber {:?}: {:?} unsubscribing it", sub_id, err + ); + needs_unsubscribing.insert(sub_id); + } + }, + ( + MatchedSubscription::Live(filter_ids), + Some(SubscriptionRecord::Live { tx, .. }), + ) if !filter_ids.is_empty() => { + if let Err(err) = tx.try_send(LiveStatementEvent { + hash: statement.hash(), + encoded: encoded.clone(), + matched_filter_ids: filter_ids.into_iter().collect(), + }) { + log::debug!( + target: LOG_TARGET, + "Failed to send statement to subscriber {:?}: {:?} unsubscribing it", sub_id, err + ); + needs_unsubscribing.insert(sub_id); + } + }, + _ => {}, + } } - } - fn notify_matching_filters(&mut self, statement: &Statement) { - self.notify_match_all_subscribers_best(statement); - self.notify_match_any_subscribers(statement); - self.notify_any_subscribers(statement); + for sub_id in needs_unsubscribing { + self.unsubscribe(sub_id); + } } - // Notify all subscribers with MatchAny filters that match the given statement. - fn notify_match_any_subscribers(&mut self, statement: &Statement) { - let mut needs_unsubscribing: HashSet = HashSet::new(); - let mut already_notified: HashSet = HashSet::new(); + fn record_match( + matches: &mut HashMap, + subscription: &IndexedSubscription, + ) { + match subscription.filter_key { + SubscriptionFilterKey::Fixed => { + matches.entry(subscription.seq_id).or_insert(MatchedSubscription::Statements); + }, + SubscriptionFilterKey::Dynamic(filter_id) => { + let entry = matches + .entry(subscription.seq_id) + .or_insert_with(|| MatchedSubscription::Live(HashSet::new())); + if let MatchedSubscription::Live(filter_ids) = entry { + filter_ids.insert(filter_id); + } + }, + } + } - let bytes_to_send: Bytes = statement.encode().into(); + // Collect all subscribers with MatchAny filters that match the given statement. + fn collect_match_any_subscribers( + &self, + statement: &Statement, + matches: &mut HashMap, + ) { for statement_topic in statement.topics() { if let Some(subscriptions) = self.subscriptions_match_any_by_topic.get(statement_topic) { - for subscription in subscriptions - .values() - .filter(|subscription| already_notified.insert(subscription.seq_id)) - { - self.notify_subscriber( - subscription, - bytes_to_send.clone(), - &mut needs_unsubscribing, - ); + for subscription in subscriptions.values() { + Self::record_match(matches, subscription); } } } - - // Unsubscribe any subscriptions that failed to receive messages, to give them a chance to - // recover and not miss statements. - for sub_id in needs_unsubscribing { - self.unsubscribe(sub_id); - } } - // Notify all subscribers with MatchAll filters that match the given statement. - fn notify_match_all_subscribers_best(&mut self, statement: &Statement) { - let bytes_to_send: Bytes = statement.encode().into(); - let mut needs_unsubscribing: HashSet = HashSet::new(); + // Collect all subscribers with MatchAll filters that match the given statement. + fn collect_match_all_subscribers( + &self, + statement: &Statement, + matches: &mut HashMap, + ) { let num_topics = statement.topics().len(); // Check all combinations of topics in the statement to find matching subscriptions. @@ -479,34 +835,16 @@ impl SubscriptionsInfo { .values() .filter(|subscription| subscription.topic_filter.matches(statement)) { - self.notify_subscriber( - subscription, - bytes_to_send.clone(), - &mut needs_unsubscribing, - ); + Self::record_match(matches, subscription); } } } - // Unsubscribe any subscriptions that failed to receive messages, to give them a chance to - // recover and not miss statements. - for sub_id in needs_unsubscribing { - self.unsubscribe(sub_id); - } } - // Notify all subscribers that don't filter by topic and want to receive all statements. - fn notify_any_subscribers(&mut self, statement: &Statement) { - let mut needs_unsubscribing: HashSet = HashSet::new(); - - let bytes_to_send: Bytes = statement.encode().into(); + // Collect all subscribers that don't filter by topic and want to receive all statements. + fn collect_any_subscribers(&self, matches: &mut HashMap) { for subscription in self.subscriptions_any.values() { - self.notify_subscriber(subscription, bytes_to_send.clone(), &mut needs_unsubscribing); - } - - // Unsubscribe any subscriptions that failed to receive messages, to give them a chance to - // recover and not miss statements. - for sub_id in needs_unsubscribing { - self.unsubscribe(sub_id); + Self::record_match(matches, subscription); } } @@ -516,9 +854,31 @@ impl SubscriptionsInfo { return; }; - let topics = match &entry { + match entry { + SubscriptionRecord::Statements { filter, .. } => { + self.remove_indexed_filter(id, SubscriptionFilterKey::Fixed, &filter); + }, + SubscriptionRecord::Live { filters, .. } => { + for (filter_id, filter) in filters { + self.remove_indexed_filter( + id, + SubscriptionFilterKey::Dynamic(filter_id), + &filter, + ); + } + }, + } + } + + fn remove_indexed_filter( + &mut self, + id: SeqID, + filter_key: SubscriptionFilterKey, + filter: &OptimizedTopicFilter, + ) { + let topics = match filter { OptimizedTopicFilter::Any => { - self.subscriptions_any.remove(&id); + self.subscriptions_any.remove(&(id, filter_key)); return; }, OptimizedTopicFilter::MatchAll(topics) => topics, @@ -530,7 +890,7 @@ impl SubscriptionsInfo { // Check MatchAny map. if let Entry::Occupied(mut entry) = self.subscriptions_match_any_by_topic.entry(*topic) { - entry.get_mut().remove(&id); + entry.get_mut().remove(&(id, filter_key)); if entry.get().is_empty() { entry.remove(); } @@ -539,7 +899,7 @@ impl SubscriptionsInfo { if let Entry::Occupied(mut entry) = self.subscriptions_match_all_by_topic.entry(*topic) { for subscriptions in entry.get_mut().iter_mut() { - if subscriptions.remove(&id).is_some() { + if subscriptions.remove(&(id, filter_key)).is_some() { break; } } @@ -637,6 +997,14 @@ mod tests { }, } } + + fn fixed_subscription(seq_id: u64, topic_filter: OptimizedTopicFilter) -> IndexedSubscription { + IndexedSubscription { + topic_filter, + seq_id: SeqID::from(seq_id), + filter_key: SubscriptionFilterKey::Fixed, + } + } #[test] fn test_subscribe_unsubscribe() { let mut subscriptions = SubscriptionsInfo::new(); @@ -644,18 +1012,17 @@ mod tests { let (tx1, _rx1) = async_channel::bounded::(10); let topic1 = Topic::from([8u8; 32]); let topic2 = Topic::from([9u8; 32]); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAll( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAll(vec![topic1, topic2].into_iter().collect()), + ); + subscriptions.subscribe(sub_info1.clone(), tx1); assert!(subscriptions.subscriptions_match_all_by_topic.contains_key(&topic1)); assert!(subscriptions.subscriptions_match_all_by_topic.contains_key(&topic2)); assert!(subscriptions.by_sub_id.contains_key(&sub_info1.seq_id)); - assert!(!subscriptions.subscriptions_any.contains_key(&sub_info1.seq_id)); + assert!(!subscriptions + .subscriptions_any + .contains_key(&(sub_info1.seq_id, sub_info1.filter_key))); subscriptions.unsubscribe(sub_info1.seq_id); assert!(!subscriptions.subscriptions_match_all_by_topic.contains_key(&topic1)); @@ -666,16 +1033,16 @@ mod tests { fn test_subscribe_any() { let mut subscriptions = SubscriptionsInfo::new(); let (tx1, _rx1) = async_channel::bounded::(10); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::Any, - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); - assert!(subscriptions.subscriptions_any.contains_key(&sub_info1.seq_id)); + let sub_info1 = fixed_subscription(1, OptimizedTopicFilter::Any); + subscriptions.subscribe(sub_info1.clone(), tx1); + assert!(subscriptions + .subscriptions_any + .contains_key(&(sub_info1.seq_id, sub_info1.filter_key))); assert!(subscriptions.by_sub_id.contains_key(&sub_info1.seq_id)); subscriptions.unsubscribe(sub_info1.seq_id); - assert!(!subscriptions.subscriptions_any.contains_key(&sub_info1.seq_id)); + assert!(!subscriptions + .subscriptions_any + .contains_key(&(sub_info1.seq_id, sub_info1.filter_key))); } #[test] @@ -685,18 +1052,17 @@ mod tests { let (tx1, _rx1) = async_channel::bounded::(10); let topic1 = Topic::from([8u8; 32]); let topic2 = Topic::from([9u8; 32]); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAny( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAny(vec![topic1, topic2].into_iter().collect()), + ); + subscriptions.subscribe(sub_info1.clone(), tx1); assert!(subscriptions.subscriptions_match_any_by_topic.contains_key(&topic1)); assert!(subscriptions.subscriptions_match_any_by_topic.contains_key(&topic2)); assert!(subscriptions.by_sub_id.contains_key(&sub_info1.seq_id)); - assert!(!subscriptions.subscriptions_any.contains_key(&sub_info1.seq_id)); + assert!(!subscriptions + .subscriptions_any + .contains_key(&(sub_info1.seq_id, sub_info1.filter_key))); subscriptions.unsubscribe(sub_info1.seq_id); assert!(!subscriptions.subscriptions_match_all_by_topic.contains_key(&topic1)); @@ -704,16 +1070,12 @@ mod tests { } #[test] - fn test_notify_any_subscribers() { + fn test_notify_matching_filters_any() { let mut subscriptions = SubscriptionsInfo::new(); let (tx1, rx1) = async_channel::bounded::(10); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::Any, - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); + let sub_info1 = fixed_subscription(1, OptimizedTopicFilter::Any); + subscriptions.subscribe(sub_info1.clone(), tx1); let statement = signed_statement(1); subscriptions.notify_matching_filters(&statement); @@ -725,20 +1087,17 @@ mod tests { } #[test] - fn test_notify_match_all_subscribers() { + fn test_notify_matching_filters_match_all() { let mut subscriptions = SubscriptionsInfo::new(); let (tx1, rx1) = async_channel::bounded::(10); let topic1 = Topic::from([8u8; 32]); let topic2 = Topic::from([9u8; 32]); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAll( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAll(vec![topic1, topic2].into_iter().collect()), + ); + subscriptions.subscribe(sub_info1.clone(), tx1); let mut statement = signed_statement(1); statement.set_topic(0, topic2); @@ -757,34 +1116,30 @@ mod tests { } #[test] - fn test_notify_match_any_subscribers() { + fn test_notify_matching_filters_match_any() { let mut subscriptions = SubscriptionsInfo::new(); let (tx1, rx1) = async_channel::bounded::(10); let (tx2, rx2) = async_channel::bounded::(10); let topic1 = Topic::from([8u8; 32]); let topic2 = Topic::from([9u8; 32]); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAny( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(1), - tx: tx1, - }; + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAny(vec![topic1, topic2].into_iter().collect()), + ); - let sub_info2 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAny(vec![topic2].into_iter().collect()), - seq_id: SeqID::from(2), - tx: tx2, - }; + let sub_info2 = fixed_subscription( + 2, + OptimizedTopicFilter::MatchAny(vec![topic2].into_iter().collect()), + ); - subscriptions.subscribe(sub_info1.clone()); - subscriptions.subscribe(sub_info2.clone()); + subscriptions.subscribe(sub_info1.clone(), tx1); + subscriptions.subscribe(sub_info2.clone(), tx2); let mut statement = signed_statement(1); statement.set_topic(0, topic1); statement.set_topic(1, topic2); - subscriptions.notify_match_any_subscribers(&statement); + subscriptions.notify_matching_filters(&statement); let received = unwrap_statement(rx1.try_recv().expect("Should receive statement")); let decoded_statement: Statement = @@ -899,23 +1254,17 @@ mod tests { let topic1 = Topic::from([8u8; 32]); let topic2 = Topic::from([9u8; 32]); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAll( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(1), - tx: tx1, - }; - let sub_info2 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAll( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(2), - tx: tx2, - }; + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAll(vec![topic1, topic2].into_iter().collect()), + ); + let sub_info2 = fixed_subscription( + 2, + OptimizedTopicFilter::MatchAll(vec![topic1, topic2].into_iter().collect()), + ); - subscriptions.subscribe(sub_info1.clone()); - subscriptions.subscribe(sub_info2.clone()); + subscriptions.subscribe(sub_info1.clone(), tx1); + subscriptions.subscribe(sub_info2.clone(), tx2); // Both subscriptions should be registered under each topic. assert_eq!( @@ -992,12 +1341,11 @@ mod tests { let (tx1, rx1) = async_channel::bounded::(1); let topic1 = Topic::from([8u8; 32]); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAny(vec![topic1].into_iter().collect()), - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAny(vec![topic1].into_iter().collect()), + ); + subscriptions.subscribe(sub_info1.clone(), tx1); let mut statement = signed_statement(1); statement.set_topic(0, topic1); @@ -1027,21 +1375,18 @@ mod tests { let topic2 = Topic::from([9u8; 32]); // Subscribe to MatchAny with both topics. - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAny( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAny(vec![topic1, topic2].into_iter().collect()), + ); + subscriptions.subscribe(sub_info1.clone(), tx1); // Create a statement that matches BOTH topics. let mut statement = signed_statement(1); statement.set_topic(0, topic1); statement.set_topic(1, topic2); - subscriptions.notify_match_any_subscribers(&statement); + subscriptions.notify_matching_filters(&statement); // Should receive exactly once, not twice. let received = unwrap_statement(rx1.try_recv().expect("Should receive statement")); @@ -1062,12 +1407,11 @@ mod tests { let topic2 = Topic::from([9u8; 32]); // Subscribe with MatchAll on only topic1. - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAll(vec![topic1].into_iter().collect()), - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAll(vec![topic1].into_iter().collect()), + ); + subscriptions.subscribe(sub_info1.clone(), tx1); // Create a statement that has BOTH topic1 and topic2. let mut statement = signed_statement(1); @@ -1095,14 +1439,11 @@ mod tests { let topic2 = Topic::from([9u8; 32]); let topic3 = Topic::from([10u8; 32]); - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAll( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1.clone()); + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAll(vec![topic1, topic2].into_iter().collect()), + ); + subscriptions.subscribe(sub_info1.clone(), tx1); // Statement with completely different topics. let mut statement = signed_statement(1); @@ -1116,10 +1457,8 @@ mod tests { #[test] fn test_match_all_with_unsubscribed_topic_first_in_statement() { - // This test exposes a bug where `return` is used instead of `continue` in - // `notify_match_all_subscribers_best`. When a statement has a topic that has no - // subscriptions (not in the map), the function returns early instead of checking - // subsequent topic combinations. + // This test guards against returning early when one statement topic has no subscriptions. + // The matcher must still check later topic combinations that can match. let mut subscriptions = SubscriptionsInfo::new(); let (tx1, rx1) = async_channel::bounded::(10); @@ -1129,12 +1468,11 @@ mod tests { let topic2 = Topic::from([2u8; 32]); // Subscribe only to topic2 with MatchAll filter. - let sub_info1 = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAll(vec![topic2].into_iter().collect()), - seq_id: SeqID::from(1), - tx: tx1, - }; - subscriptions.subscribe(sub_info1); + let sub_info1 = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAll(vec![topic2].into_iter().collect()), + ); + subscriptions.subscribe(sub_info1, tx1); // Create a statement with BOTH topics. topic1 comes first (lower bytes). // When iterating combinations(1), [topic1] is checked before [topic2]. @@ -1144,14 +1482,13 @@ mod tests { statement.set_topic(0, topic1); statement.set_topic(1, topic2); - subscriptions.notify_match_all_subscribers_best(&statement); + subscriptions.notify_matching_filters(&statement); - // With the bug: rx1.try_recv() fails because the function returned early. - // With the fix: rx1.try_recv() succeeds because [topic2] combination is checked. - let received = unwrap_statement(rx1.try_recv().expect( - "Should receive statement - if this fails, the `return` bug in \ - notify_match_all_subscribers_best is present (should be `continue`)", - )); + // The receive succeeds only if the matcher checks the [topic2] combination. + let received = unwrap_statement( + rx1.try_recv() + .expect("Should receive statement from a later matching topic combination"), + ); let decoded_statement: Statement = Statement::decode(&mut &received.0[..]).expect("Should decode statement"); assert_eq!(decoded_statement, statement); @@ -1283,32 +1620,22 @@ mod tests { let topic2 = Topic::from([9u8; 32]); // Subscribe with MatchAll filter. - let sub_match_all = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAll( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(1), - tx: tx_match_all, - }; - subscriptions.subscribe(sub_match_all); + let sub_match_all = fixed_subscription( + 1, + OptimizedTopicFilter::MatchAll(vec![topic1, topic2].into_iter().collect()), + ); + subscriptions.subscribe(sub_match_all, tx_match_all); // Subscribe with MatchAny filter. - let sub_match_any = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::MatchAny( - vec![topic1, topic2].into_iter().collect(), - ), - seq_id: SeqID::from(2), - tx: tx_match_any, - }; - subscriptions.subscribe(sub_match_any); + let sub_match_any = fixed_subscription( + 2, + OptimizedTopicFilter::MatchAny(vec![topic1, topic2].into_iter().collect()), + ); + subscriptions.subscribe(sub_match_any, tx_match_any); // Subscribe with Any filter. - let sub_any = SubscriptionInfo { - topic_filter: OptimizedTopicFilter::Any, - seq_id: SeqID::from(3), - tx: tx_any, - }; - subscriptions.subscribe(sub_any); + let sub_any = fixed_subscription(3, OptimizedTopicFilter::Any); + subscriptions.subscribe(sub_any, tx_any); // Create a statement without any topics set. let statement = signed_statement(1); From e286b2a18d7c1403a95d68f173e66e0667b29e85 Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Fri, 8 May 2026 16:40:43 +0100 Subject: [PATCH 04/16] statement-store: cap stop tests --- .../statement-store/src/subscription.rs | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/substrate/client/statement-store/src/subscription.rs b/substrate/client/statement-store/src/subscription.rs index b996d06fe5a9d..472ed727c01dc 100644 --- a/substrate/client/statement-store/src/subscription.rs +++ b/substrate/client/statement-store/src/subscription.rs @@ -1005,6 +1005,114 @@ mod tests { filter_key: SubscriptionFilterKey::Fixed, } } + + fn hash_from_u64(value: u64) -> sp_statement_store::Hash { + let mut hash = [0u8; 32]; + hash[..8].copy_from_slice(&value.to_le_bytes()); + hash + } + + fn live_event_for(statement: &Statement, filter_ids: Vec) -> LiveStatementEvent { + LiveStatementEvent { + hash: statement.hash(), + encoded: statement.encode(), + matched_filter_ids: filter_ids, + } + } + + #[test] + fn multi_filter_buffers_live_until_replay_done_and_deduplicates_replayed_filter() { + let mut state = MultiFilterSubscriptionState::new(); + let replay_filter = FilterId::new(1); + let live_filter = FilterId::new(2); + let statement = signed_statement(42); + let encoded = statement.encode(); + + state.record_filter_added(replay_filter, vec![encoded.clone()]); + state.active_filter_ids.insert(live_filter); + + assert!(state + .handle_live_event(live_event_for(&statement, vec![replay_filter, live_filter])) + .is_none()); + assert_eq!(state.pending_live.len(), 1); + + match state.next_event() { + Some(MultiFilterSubscriptionEvent::ReplayStatements { filter_id, statements }) => { + assert_eq!(filter_id, replay_filter); + assert_eq!(statements, vec![encoded]); + }, + other => panic!("expected replay statements, got {other:?}"), + } + assert!(matches!( + state.next_event(), + Some(MultiFilterSubscriptionEvent::ReplayDone { filter_id }) if filter_id == replay_filter + )); + + match state.next_event() { + Some(MultiFilterSubscriptionEvent::NewStatement(event)) => { + assert_eq!(event.hash, statement.hash()); + assert_eq!(event.matched_filter_ids, vec![live_filter]); + }, + other => panic!("expected live statement for non-replayed filter, got {other:?}"), + } + assert!(state.next_event().is_none()); + } + + #[test] + fn multi_filter_pending_live_cap_stops_subscription() { + let mut state = MultiFilterSubscriptionState::new(); + let filter_id = FilterId::new(1); + state.active_filter_ids.insert(filter_id); + state.replays_in_progress.insert(filter_id); + + for i in 0..PENDING_LIVE_HARD_CAP { + let event = LiveStatementEvent { + hash: hash_from_u64(i as u64), + encoded: vec![i as u8], + matched_filter_ids: vec![filter_id], + }; + assert!(state.handle_live_event(event).is_none()); + } + assert_eq!(state.pending_live.len(), PENDING_LIVE_HARD_CAP); + + let overflow = LiveStatementEvent { + hash: hash_from_u64(PENDING_LIVE_HARD_CAP as u64), + encoded: vec![0], + matched_filter_ids: vec![filter_id], + }; + assert!(state.handle_live_event(overflow).is_none()); + assert!(matches!(state.next_event(), Some(MultiFilterSubscriptionEvent::Stop))); + assert!(state.next_event().is_none()); + } + + #[test] + fn multi_filter_new_statement_history_cap_stops_subscription() { + let mut state = MultiFilterSubscriptionState::new(); + let filter_id = FilterId::new(1); + state.active_filter_ids.insert(filter_id); + + for i in 0..EMITTED_VIA_NEW_HARD_CAP { + let event = LiveStatementEvent { + hash: hash_from_u64(i as u64), + encoded: vec![i as u8], + matched_filter_ids: vec![filter_id], + }; + assert!(matches!( + state.handle_live_event(event), + Some(MultiFilterSubscriptionEvent::NewStatement(_)) + )); + } + + let overflow = LiveStatementEvent { + hash: hash_from_u64(EMITTED_VIA_NEW_HARD_CAP as u64), + encoded: vec![0], + matched_filter_ids: vec![filter_id], + }; + assert!(state.handle_live_event(overflow).is_none()); + assert!(matches!(state.next_event(), Some(MultiFilterSubscriptionEvent::Stop))); + assert!(state.next_event().is_none()); + } + #[test] fn test_subscribe_unsubscribe() { let mut subscriptions = SubscriptionsInfo::new(); From 88fb3a819b1c95d74b6048efe2aeee9d53183bde Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 8 May 2026 16:01:15 +0000 Subject: [PATCH 05/16] Update from github-actions[bot] running command 'fmt' --- substrate/client/statement-store/src/subscription.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/substrate/client/statement-store/src/subscription.rs b/substrate/client/statement-store/src/subscription.rs index 472ed727c01dc..a37a57b77f3e5 100644 --- a/substrate/client/statement-store/src/subscription.rs +++ b/substrate/client/statement-store/src/subscription.rs @@ -378,13 +378,8 @@ impl MultiFilterSubscriptionState { /// Event emitted by a multi-filter subscription #[derive(Debug, Clone)] pub enum MultiFilterSubscriptionEvent { - ReplayStatements { - filter_id: FilterId, - statements: Vec>, - }, - ReplayDone { - filter_id: FilterId, - }, + ReplayStatements { filter_id: FilterId, statements: Vec> }, + ReplayDone { filter_id: FilterId }, NewStatement(LiveStatementEvent), Stop, } From e8de00dd505c3f242ce11dd32d859371cc04311b Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Fri, 8 May 2026 17:23:40 +0100 Subject: [PATCH 06/16] statement-store: docs --- substrate/client/statement-store/src/subscription.rs | 9 +++++++++ substrate/primitives/statement-store/src/store_api.rs | 6 ++++++ 2 files changed, 15 insertions(+) diff --git a/substrate/client/statement-store/src/subscription.rs b/substrate/client/statement-store/src/subscription.rs index 472ed727c01dc..e7742835eeb65 100644 --- a/substrate/client/statement-store/src/subscription.rs +++ b/substrate/client/statement-store/src/subscription.rs @@ -162,6 +162,7 @@ impl SubscriptionHandle { Ok(filter_id) } + /// Removes a filter from this subscription pub fn remove_filter(&self, filter_id: FilterId) -> bool { let _guard = self.add_filter_lock.lock(); if !self.state.lock().record_filter_removed(filter_id) { @@ -175,6 +176,7 @@ impl SubscriptionHandle { true } + /// Returns active filter identifiers for this subscription pub fn filter_ids(&self) -> Vec { self.state.lock().active_filter_ids.iter().copied().collect() } @@ -378,14 +380,21 @@ impl MultiFilterSubscriptionState { /// Event emitted by a multi-filter subscription #[derive(Debug, Clone)] pub enum MultiFilterSubscriptionEvent { + /// Replay statements for a newly attached filter ReplayStatements { + /// Filter that produced this replay batch filter_id: FilterId, + /// SCALE-encoded statements included in this replay batch statements: Vec>, }, + /// Replay completed for a newly attached filter ReplayDone { + /// Filter whose replay completed filter_id: FilterId, }, + /// Live statement event matched one or more active filters NewStatement(LiveStatementEvent), + /// Subscription stopped because local resource limits were reached Stop, } diff --git a/substrate/primitives/statement-store/src/store_api.rs b/substrate/primitives/statement-store/src/store_api.rs index c08a17ac4c1c8..2de8638d538aa 100644 --- a/substrate/primitives/statement-store/src/store_api.rs +++ b/substrate/primitives/statement-store/src/store_api.rs @@ -20,14 +20,17 @@ use crate::{Hash, Statement, Topic, MAX_ANY_TOPICS, MAX_TOPICS}; use sp_core::{bounded_vec::BoundedVec, Bytes, ConstU32}; use std::collections::HashSet; +/// Identifier for a filter attached to a multi-filter subscription #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct FilterId(u64); impl FilterId { + /// Creates a filter id from its numeric representation pub fn new(id: u64) -> Self { FilterId(id) } + /// Returns the numeric representation of this filter id pub fn as_u64(&self) -> u64 { self.0 } @@ -42,8 +45,11 @@ impl std::fmt::Display for FilterId { /// Live statement event emitted by a multi-filter subscription #[derive(Debug, Clone)] pub struct LiveStatementEvent { + /// Hash of the statement pub hash: Hash, + /// SCALE-encoded statement bytes pub encoded: Vec, + /// Filter ids that matched the statement pub matched_filter_ids: Vec, } From c6792de7b4f8314f482d27ec10516a07f61fba87 Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Fri, 8 May 2026 17:56:07 +0100 Subject: [PATCH 07/16] statement-store: docs --- .../client/rpc-spec-v2/src/statement/error.rs | 2 ++ .../client/rpc-spec-v2/src/statement/event.rs | 18 +++++++++++++++++- .../client/rpc-spec-v2/src/statement/mod.rs | 3 +++ .../rpc-spec-v2/src/statement/statement.rs | 1 + 4 files changed, 23 insertions(+), 1 deletion(-) diff --git a/substrate/client/rpc-spec-v2/src/statement/error.rs b/substrate/client/rpc-spec-v2/src/statement/error.rs index 9ea7f0753abea..d14e0c7228a21 100644 --- a/substrate/client/rpc-spec-v2/src/statement/error.rs +++ b/substrate/client/rpc-spec-v2/src/statement/error.rs @@ -35,6 +35,7 @@ pub enum Error { InternalError(String), } +/// Error codes defined by the statement RPC specification. pub mod rpc_spec_v2 { /// Connection has too many statement subscriptions pub const REACHED_LIMITS: i32 = -32800; @@ -42,6 +43,7 @@ pub mod rpc_spec_v2 { pub const INVALID_SUBSCRIPTION: i32 = -32801; } +/// Error codes defined by the JSON-RPC specification. pub mod json_rpc_spec { /// Request parameters are invalid pub const INVALID_PARAM_ERROR: i32 = -32602; diff --git a/substrate/client/rpc-spec-v2/src/statement/event.rs b/substrate/client/rpc-spec-v2/src/statement/event.rs index 693754db49125..9afa0cd66ea05 100644 --- a/substrate/client/rpc-spec-v2/src/statement/event.rs +++ b/substrate/client/rpc-spec-v2/src/statement/event.rs @@ -26,17 +26,23 @@ use sp_statement_store::{InvalidReason, RejectionReason}; pub enum SubscribeEvent { /// Statements admitted before the filter was attached ReplayStatements { + /// Filter that produced this replay batch #[serde(rename = "filterId")] filter_id: String, + /// SCALE-encoded statements included in this replay batch statements: Vec, }, /// Replay completion marker ReplayDone { + /// Filter whose replay completed #[serde(rename = "filterId")] filter_id: String, }, /// Statements admitted after matching filters were attached - NewStatements { statements: Vec }, + NewStatements { + /// Statement entries included in this notification + statements: Vec, + }, /// Terminal notification Stop, } @@ -44,7 +50,9 @@ pub enum SubscribeEvent { /// Statement item included in a `newStatements` notification #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct NewStatementEntry { + /// SCALE-encoded statement bytes pub statement: Bytes, + /// Filters that matched this statement #[serde(rename = "filterIds")] pub filter_ids: Vec, } @@ -53,22 +61,29 @@ pub struct NewStatementEntry { #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[serde(untagged)] pub enum AddFilterResponse { + /// Filter was added and the string contains its filter id Ok(String), + /// Filter could not be added because the subscription reached its limit LimitReached(LimitReachedResult), } +/// Response payload for a limit-reached add-filter result #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct LimitReachedResult { + /// Machine-readable result tag pub result: LimitReachedTag, } +/// Result tag returned when the filter limit is reached #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub enum LimitReachedTag { + /// The subscription cannot accept another filter LimitReached, } impl AddFilterResponse { + /// Returns the limit-reached response pub fn limit_reached() -> Self { AddFilterResponse::LimitReached(LimitReachedResult { result: LimitReachedTag::LimitReached, @@ -76,6 +91,7 @@ impl AddFilterResponse { } } +/// Statement submission outcome #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[serde(tag = "status", rename_all = "camelCase")] pub enum SubmitOutcome { diff --git a/substrate/client/rpc-spec-v2/src/statement/mod.rs b/substrate/client/rpc-spec-v2/src/statement/mod.rs index f014cd181ae36..6b2185bf24c8b 100644 --- a/substrate/client/rpc-spec-v2/src/statement/mod.rs +++ b/substrate/client/rpc-spec-v2/src/statement/mod.rs @@ -16,8 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +/// JSON-RPC method definitions for statement store RPC. pub mod api; +/// Error types for statement store RPC. pub mod error; +/// Event and response types for statement store RPC. pub mod event; mod statement; mod subscription; diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs index 030878e72de1f..a7a02a4e1ef9b 100644 --- a/substrate/client/rpc-spec-v2/src/statement/statement.rs +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -54,6 +54,7 @@ where B: StatementStore + Send + Sync + 'static, Arc: MultiFilterSubscriptionApi, { + /// Creates a new statement RPC implementation pub fn new(store: Arc, executor: SubscriptionTaskExecutor) -> Self { Self { store, executor, subscriptions: StatementSubscriptions::new() } } From c6836c4800a1090c8ffe636c9b13f218f45aa345 Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Mon, 11 May 2026 11:51:14 +0100 Subject: [PATCH 08/16] statement-store: docs --- substrate/client/rpc-spec-v2/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/client/rpc-spec-v2/src/lib.rs b/substrate/client/rpc-spec-v2/src/lib.rs index 091c7d9943de6..7a296a26c9eed 100644 --- a/substrate/client/rpc-spec-v2/src/lib.rs +++ b/substrate/client/rpc-spec-v2/src/lib.rs @@ -31,6 +31,7 @@ pub mod archive; pub mod bitswap; pub mod chain_head; pub mod chain_spec; +/// Statement store JSON-RPC methods. pub mod statement; pub mod transaction; From def5ff48bbc9593706c03ee27f0b06e832daf563 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 11 May 2026 11:55:24 +0000 Subject: [PATCH 09/16] Update from github-actions[bot] running command 'prdoc --audience node_dev --bump patch' --- prdoc/pr_11989.prdoc | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 prdoc/pr_11989.prdoc diff --git a/prdoc/pr_11989.prdoc b/prdoc/pr_11989.prdoc new file mode 100644 index 0000000000000..4e02ac5af4423 --- /dev/null +++ b/prdoc/pr_11989.prdoc @@ -0,0 +1,22 @@ +title: 'statement-store: new api implementation' +doc: +- audience: Node Dev + description: |- + ## Summary + + Adds the `statement_unstable_*` JSON-RPC API implementation for statement-store and wires it into the parachain node RPC stack. + + ## Changes + + - Add `statement_unstable_submit`, `statement_unstable_subscribe`, `statement_unstable_add_filter`, and `statement_unstable_remove_filter` + - Add multi-filter subscription support with replay and live statement delivery + - Add zombienet test for the unstable RPC multi-filter flow +crates: +- name: polkadot-omni-node-lib + bump: patch +- name: sc-rpc-spec-v2 + bump: patch +- name: sc-statement-store + bump: patch +- name: sp-statement-store + bump: patch From 8c61ec2833420d2924dde4c7af4e5ea7dfc942bb Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Tue, 12 May 2026 21:19:49 +0100 Subject: [PATCH 10/16] statement-store: clean up unstable subscription handling --- .../zombie_ci/statement_store/integration.rs | 69 ++- .../tests/zombie_ci/statement_store/mod.rs | 1 + .../statement_store/unstable_bench.rs | 398 ++++++++++++++++++ .../rpc-spec-v2/src/statement/statement.rs | 24 +- .../rpc-spec-v2/src/statement/subscription.rs | 158 +++---- .../client/rpc-spec-v2/src/statement/tests.rs | 17 +- substrate/client/statement-store/src/lib.rs | 69 ++- .../statement-store/src/subscription.rs | 177 ++++++-- 8 files changed, 725 insertions(+), 188 deletions(-) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/unstable_bench.rs diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs index f6b99c0ff6b4a..96e801d9e3f23 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs @@ -2,11 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 use super::common::{ - assert_no_more_statements, assert_statements_match, base_dir, collator_args, - create_chain_spec_with_allowances, expect_one_statement, expect_statements_unordered, - online_client_from_node, spawn_network, spawn_network_sudo, - spawn_network_with_injected_allowances, submit_statement, subscribe_topic, - subscribe_topic_filter, wait_for_first_block, COLLATOR_INFO_LOG_FILTER, + add_filter_unstable, assert_no_more_statements, assert_statements_match, base_dir, + collator_args, create_chain_spec_with_allowances, expect_one_statement, + expect_statements_unordered, online_client_from_node, remove_filter_unstable, spawn_network, + spawn_network_sudo, spawn_network_with_injected_allowances, submit_statement, + submit_statement_unstable, subscribe_topic, subscribe_topic_filter, subscribe_unstable, + unstable_subscription_id, wait_for_first_block, UnstableAddFilterResponse, + UnstableNewStatement, UnstableStatementEvent, COLLATOR_INFO_LOG_FILTER, COLLATOR_TRACE_LOG_FILTER, }; use codec::Encode; @@ -21,7 +23,6 @@ use sc_statement_store::{ test_utils::{create_allowance_items, create_test_statement, get_keypair}, }; use sp_core::{sr25519, Bytes, Pair}; -use sp_core::Bytes; use sp_runtime::BoundedVec; use sp_statement_store::{ RejectionReason, Statement, StatementAllowance, SubmitResult, Topic, TopicFilter, @@ -49,6 +50,26 @@ async fn expect_unstable_event( .map_err(|e| anyhow::anyhow!("Unstable statement subscription error: {}", e)) } +async fn expect_one_unstable_statement( + subscription: &mut zombienet_sdk::subxt::ext::subxt_rpcs::client::RpcSubscription< + UnstableStatementEvent, + >, + timeout_secs: u64, +) -> Result { + loop { + return match expect_unstable_event(subscription, timeout_secs).await? { + UnstableStatementEvent::NewStatements { statements } => { + if statements.is_empty() { + continue; + } + assert_eq!(statements.len(), 1); + Ok(statements.into_iter().next().unwrap()) + }, + event => anyhow::bail!("Expected unstable newStatements event, got {:?}", event), + }; + } +} + async fn collect_unstable_replay( subscription: &mut zombienet_sdk::subxt::ext::subxt_rpcs::client::RpcSubscription< UnstableStatementEvent, @@ -128,6 +149,42 @@ async fn statement_store_basic_propagation() -> Result<(), anyhow::Error> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn statement_store_unstable_basic_propagation() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let network = spawn_network_with_injected_allowances(&["charlie", "dave"], 8).await?; + + let charlie = network.get_node("charlie")?; + let dave = network.get_node("dave")?; + + let charlie_rpc = charlie.rpc().await?; + let dave_rpc = dave.rpc().await?; + + let topic: Topic = [0x55u8; 32].into(); + let keypair = get_keypair(0); + let statement = create_test_statement(&keypair, &[topic], None, vec![1, 2, 3], u32::MAX, 0); + let expected: Bytes = statement.encode().into(); + + let mut subscription = subscribe_unstable(&dave_rpc).await?; + let subscription_id = unstable_subscription_id(&subscription)?; + let filter = + filter_id(add_filter_unstable(&dave_rpc, &subscription_id, match_all_filter(topic)).await?); + let replayed = collect_unstable_replay(&mut subscription, &filter).await?; + assert!(replayed.is_empty(), "Fresh unstable filter should not replay statements"); + + let result = submit_statement_unstable(&charlie_rpc, &statement).await?; + assert_eq!(result, SubmitResult::New); + + let received = expect_one_unstable_statement(&mut subscription, 20).await?; + assert_eq!(received.statement, expected, "Unstable statement data mismatch"); + assert_eq!(received.filter_ids, vec![filter], "Unexpected unstable filter ids"); + + Ok(()) +} + #[tokio::test(flavor = "multi_thread")] async fn statement_store_unstable_rpc_multi_filter_flow() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/mod.rs index eb2f0eb73a174..5204a99896e96 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/mod.rs @@ -4,3 +4,4 @@ mod bench; mod common; mod integration; +mod unstable_bench; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/unstable_bench.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/unstable_bench.rs new file mode 100644 index 0000000000000..e816da8b03dca --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/unstable_bench.rs @@ -0,0 +1,398 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Benchmarking statement store performance through the unstable RPC API + +use anyhow::anyhow; +use log::{debug, info}; +use sc_statement_store::test_utils::get_keypair; +use sp_core::blake2_256; +use sp_statement_store::{Statement, Topic, TopicFilter}; +use std::{sync::Arc, time::Duration}; +use tokio::{sync::Barrier, time::timeout}; +use zombienet_sdk::subxt::{backend::rpc::RpcClient, ext::subxt_rpcs::client::RpcSubscription}; + +use super::common::{ + add_filter_unstable, spawn_network_with_injected_allowances, submit_statement_unstable, + subscribe_unstable, unstable_subscription_id, UnstableAddFilterResponse, + UnstableStatementEvent, RPC_POOL_SIZE, +}; + +struct LatencyBenchConfig { + num_rounds: usize, + num_nodes: usize, + num_clients: u32, + max_retries: u32, + interval_ms: u64, + req_timeout_ms: u64, + messages_pattern: &'static [(usize, usize)], +} + +impl LatencyBenchConfig { + fn messages_per_client(&self) -> usize { + self.messages_pattern.iter().map(|(count, _)| count).sum() + } +} + +#[derive(Debug, Clone)] +struct RoundStats { + send_duration: Duration, + receive_duration: Duration, + full_latency: Duration, + sent_count: u32, + received_count: u32, + receive_attempts: u32, +} + +async fn add_match_all_filter_unstable( + rpc: &RpcClient, + subscription_id: &str, + topic: Topic, +) -> Result { + match add_filter_unstable( + rpc, + subscription_id, + TopicFilter::MatchAll(vec![topic].try_into().expect("Single topic")), + ) + .await? + { + UnstableAddFilterResponse::Ok(filter_id) => Ok(filter_id), + UnstableAddFilterResponse::LimitReached { result } => { + Err(anyhow!("Unexpected unstable filter limit response: {result}")) + }, + } +} + +async fn collect_empty_unstable_replay( + subscription: &mut RpcSubscription, + filter_id: &str, +) -> Result<(), anyhow::Error> { + loop { + let event = timeout(Duration::from_secs(20), subscription.next()) + .await + .map_err(|_| anyhow!("Timeout waiting for unstable replayDone"))? + .ok_or_else(|| anyhow!("Unstable statement subscription ended during replay"))? + .map_err(|e| anyhow!("Unstable statement subscription error: {e}"))?; + + match event { + UnstableStatementEvent::ReplayStatements { filter_id: id, statements } + if id == filter_id => + { + if !statements.is_empty() { + return Err(anyhow!( + "Expected empty unstable replay for filter {filter_id}, got {} statement(s)", + statements.len() + )); + } + }, + UnstableStatementEvent::ReplayDone { filter_id: id } if id == filter_id => { + return Ok(()) + }, + event => { + return Err(anyhow!( + "Unexpected unstable event while draining replay for filter {filter_id}: {event:?}" + )); + }, + } + } +} + +async fn next_unstable_statements( + subscription: &mut RpcSubscription, + expected_count: u32, + total_timeout: Duration, +) -> Result { + let deadline = tokio::time::Instant::now() + total_timeout; + let mut received_count = 0; + + while received_count < expected_count { + let remaining = deadline.saturating_duration_since(tokio::time::Instant::now()); + if remaining.is_zero() { + return Err(anyhow!( + "Timeout waiting for unstable statements: received {received_count}/{expected_count}" + )); + } + + let event = timeout(remaining, subscription.next()) + .await + .map_err(|_| { + anyhow!( + "Timeout waiting for unstable statements: received {received_count}/{expected_count}" + ) + })? + .ok_or_else(|| anyhow!("Unstable statement subscription ended unexpectedly"))? + .map_err(|e| anyhow!("Unstable statement subscription error: {e}"))?; + + match event { + UnstableStatementEvent::NewStatements { statements } => { + for entry in &statements { + if entry.filter_ids.is_empty() { + return Err(anyhow!("Unstable newStatements entry has no filter ids")); + } + } + received_count += statements.len() as u32; + }, + UnstableStatementEvent::Stop => { + return Err(anyhow!( + "Unstable statement subscription stopped after {received_count}/{expected_count}" + )); + }, + event => return Err(anyhow!("Unexpected unstable event while receiving: {event:?}")), + } + } + + Ok(received_count) +} + +#[tokio::test(flavor = "multi_thread")] +async fn statement_store_unstable_latency_bench() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let config = Arc::new(LatencyBenchConfig { + num_nodes: 5, + num_clients: 50000, + interval_ms: 10000, + num_rounds: 1, + messages_pattern: &[(5, 1024 / 2)], + max_retries: 500, + req_timeout_ms: 3000, + }); + + let collator_names: Vec = + (0..config.num_nodes).map(|i| format!("collator{i}")).collect(); + let collator_names: Vec<&str> = collator_names.iter().map(|s| s.as_str()).collect(); + + let network = + spawn_network_with_injected_allowances(&collator_names, config.num_clients).await?; + + info!("Starting unstable Latency benchmark"); + info!(""); + info!("Clients: {}", config.num_clients); + info!("Nodes: {}", config.num_nodes); + info!("Rounds: {}", config.num_rounds); + info!("Interval, ms: {}", config.interval_ms); + info!("Messages, per round: {}", config.messages_per_client() as u32 * config.num_clients); + info!("Message pattern:"); + for &(count, size) in config.messages_pattern { + info!(" - {} messages {} bytes", count, size); + } + info!(""); + + let clients_per_node = config.num_clients as usize / config.num_nodes; + let pool_size_per_node = RPC_POOL_SIZE.min(clients_per_node); + let mut rpc_pools: Vec> = Vec::new(); + for &name in &collator_names { + let node = network.get_node(name)?; + let mut pool = Vec::with_capacity(pool_size_per_node); + for _ in 0..pool_size_per_node { + pool.push(node.rpc().await?); + } + rpc_pools.push(pool); + } + info!( + "Created unstable RPC connection pool: {} connections x {} nodes = {} total", + pool_size_per_node, + collator_names.len(), + pool_size_per_node * collator_names.len() + ); + + let barrier = Arc::new(Barrier::new(config.num_clients as usize)); + let sync_start = std::time::Instant::now(); + + let test_run_id = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_micros() as u64; + + let handles: Vec<_> = (0..config.num_clients) + .map(|client_id| { + let config = Arc::clone(&config); + let barrier = Arc::clone(&barrier); + let keyring = get_keypair(client_id); + let node_idx = (client_id as usize) % config.num_nodes; + let conn_idx = (client_id as usize / config.num_nodes) % pool_size_per_node; + let rpc_client = rpc_pools[node_idx][conn_idx].clone(); + let neighbour_id = (client_id + 1) % config.num_clients; + let neighbour_node_idx = (neighbour_id as usize) % config.num_nodes; + if node_idx == neighbour_node_idx && config.num_nodes > 1 { + panic!( + "Client {client_id} and neighbour {neighbour_id} are on the same node {node_idx}!" + ); + } + + tokio::spawn(async move { + barrier.wait().await; + + if client_id == 0 { + let sync_time = sync_start.elapsed(); + debug!( + "All {} unstable tasks synchronized and starting work in {:.3}s", + config.num_clients, + sync_time.as_secs_f64() + ); + } + + let submission_jitter = (client_id % 1000) as u64; + tokio::time::sleep(Duration::from_millis(submission_jitter)).await; + + let mut rounds_stats = Vec::new(); + for round in 0..config.num_rounds { + let round_start = std::time::Instant::now(); + + if client_id == 0 { + info!("Creating unstable subscription and filters for expected messages"); + } + + let mut subscription = subscribe_unstable(&rpc_client).await?; + let subscription_id = unstable_subscription_id(&subscription)?; + let mut filter_ids = Vec::new(); + for msg_idx in 0..config.messages_per_client() as u32 { + let topic_str = format!("{test_run_id}-{client_id}-{round}-{msg_idx}"); + + if client_id == 0 { + info!("Adding unstable filter for {msg_idx} message(s) {topic_str:?}"); + } + + let topic: Topic = blake2_256(topic_str.as_bytes()).into(); + let filter_id = + add_match_all_filter_unstable(&rpc_client, &subscription_id, topic) + .await?; + collect_empty_unstable_replay(&mut subscription, &filter_id).await?; + filter_ids.push((msg_idx, topic_str, filter_id)); + } + + if client_id == 0 { + info!("Created {} unstable filters", filter_ids.len()); + } + + let mut msg_idx: u32 = 0; + + if client_id == 0 { + info!("Start sending messages via unstable submit"); + } + + for &(count, size) in config.messages_pattern { + for _ in 0..count { + let mut statement = Statement::new(); + + let topic_str = format!("{test_run_id}-{client_id}-{round}-{msg_idx}"); + let topic = blake2_256(topic_str.as_bytes()); + let channel = blake2_256(msg_idx.to_le_bytes().as_ref()); + + let timestamp_ms = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_millis() as u32; + + statement.set_channel(channel); + statement.set_expiry_from_parts(u32::MAX, timestamp_ms); + statement.set_topic(0, topic.into()); + statement.set_plain_data(vec![0u8; size]); + statement.sign_sr25519_private(&keyring); + + let result = submit_statement_unstable(&rpc_client, &statement).await?; + + msg_idx += 1; + if client_id == 0 { + info!( + "Sent {msg_idx} unstable message(s) {topic_str:?}, {result:?}" + ); + } + } + } + + let sent_count = msg_idx; + let send_duration = round_start.elapsed(); + + let receive_start = std::time::Instant::now(); + let expected_count = config.messages_per_client() as u32; + let total_timeout = + Duration::from_millis(config.req_timeout_ms * config.max_retries as u64); + let received_count = + next_unstable_statements(&mut subscription, expected_count, total_timeout) + .await?; + + let receive_duration = receive_start.elapsed(); + let full_latency = round_start.elapsed(); + if full_latency < Duration::from_millis(config.interval_ms) { + tokio::time::sleep( + Duration::from_millis(config.interval_ms) - full_latency, + ) + .await; + } + + rounds_stats.push(RoundStats { + send_duration, + receive_duration, + full_latency, + sent_count, + received_count, + receive_attempts: filter_ids.len() as u32, + }); + } + + let expected_count = config.messages_per_client() as u32; + for stats in &rounds_stats { + if stats.sent_count != expected_count { + return Err(anyhow!( + "Client {}: Expected {} unstable messages sent, but got {}", + client_id, + expected_count, + stats.sent_count + )); + } + if stats.received_count != expected_count { + return Err(anyhow!( + "Client {}: Expected {} unstable messages received, but got {}", + client_id, + expected_count, + stats.received_count + )); + } + } + + Ok::<_, anyhow::Error>(rounds_stats) + }) + }) + .collect(); + + let mut all_round_stats = Vec::new(); + for handle in handles { + let stats = handle.await??; + all_round_stats.extend(stats); + } + + let calc_stats = |values: Vec| -> (f64, f64, f64) { + let min = values.iter().copied().fold(f64::INFINITY, f64::min); + let max = values.iter().copied().fold(f64::NEG_INFINITY, f64::max); + let avg = values.iter().sum::() / values.len() as f64; + (min, avg, max) + }; + + let send_s = + calc_stats(all_round_stats.iter().map(|s| s.send_duration.as_secs_f64()).collect()); + let read_s = + calc_stats(all_round_stats.iter().map(|s| s.receive_duration.as_secs_f64()).collect()); + let latency_s = + calc_stats(all_round_stats.iter().map(|s| s.full_latency.as_secs_f64()).collect()); + let attempts = calc_stats(all_round_stats.iter().map(|s| s.receive_attempts as f64).collect()); + let attempts_per_msg = ( + attempts.0 / config.messages_per_client() as f64, + attempts.1 / config.messages_per_client() as f64, + attempts.2 / config.messages_per_client() as f64, + ); + + info!(""); + info!(" Min Avg Max"); + info!("Send, s {:>8.3} {:>8.3} {:>8.3}", send_s.0, send_s.1, send_s.2); + info!("Receive, s {:>8.3} {:>8.3} {:>8.3}", read_s.0, read_s.1, read_s.2); + info!("Latency, s {:>8.3} {:>8.3} {:>8.3}", latency_s.0, latency_s.1, latency_s.2); + info!( + "Attempts, per msg {:>8.1} {:>8.1} {:>8.1}", + attempts_per_msg.0, attempts_per_msg.1, attempts_per_msg.2 + ); + + Ok(()) +} diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs index a7a02a4e1ef9b..937d0bd850569 100644 --- a/substrate/client/rpc-spec-v2/src/statement/statement.rs +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -93,34 +93,20 @@ where fn statement_unstable_subscribe(&self, pending: PendingSubscriptionSink, _ext: &Extensions) { let subscriptions = self.subscriptions.clone(); let store = self.store.clone(); - let executor = self.executor.clone(); + let connection_id = pending.connection_id(); let fut = async move { - let connection_id = pending.connection_id(); - let Some(reserved) = subscriptions.reserve(connection_id) else { - pending.reject(Error::ReachedLimits).await; - return; - }; - let Ok(sink) = pending.accept().await.map(Subscription::from) else { return }; let sub_id = read_subscription_id_as_string(&sink); - let (handle, live_stream) = store.create_subscription(); - let Some(entry) = reserved.register(sub_id.clone(), handle) else { + + let Some(entry) = subscriptions.register(connection_id, sub_id.clone(), handle) else { log::debug!(target: LOG_TARGET, "duplicate subscription id {sub_id}; aborting"); return; }; - let task = run_subscription_task(sink, live_stream); - executor.spawn( - "statement-unstable-subscribe", - Some("rpc"), - async move { - task.await; - drop(entry); - } - .boxed(), - ); + run_subscription_task(sink, live_stream).await; + drop(entry); }; self.executor diff --git a/substrate/client/rpc-spec-v2/src/statement/subscription.rs b/substrate/client/rpc-spec-v2/src/statement/subscription.rs index 267b4072fee5a..f1ffca0e144d9 100644 --- a/substrate/client/rpc-spec-v2/src/statement/subscription.rs +++ b/substrate/client/rpc-spec-v2/src/statement/subscription.rs @@ -22,7 +22,7 @@ use crate::statement::{ }; use futures::StreamExt; use jsonrpsee::ConnectionId; -use parking_lot::{Mutex, RwLock}; +use parking_lot::RwLock; use sc_rpc::utils::Subscription; use sc_statement_store::{ AddFilterError, LiveEventStream, MultiFilterSubscriptionEvent, SubscriptionHandle, @@ -30,122 +30,77 @@ use sc_statement_store::{ use sp_statement_store::{FilterId, OptimizedTopicFilter}; use std::{collections::HashMap, sync::Arc}; -use crate::common::connections::{RegisteredConnection, RpcConnections}; - -pub const MAX_SUBSCRIPTIONS_PER_CONNECTION: usize = 16; - pub(crate) enum AddFilterOutcome { Added(FilterId), LimitReached, } -/// Per-subscription state shared between RPC handlers and the subscription task -pub(crate) struct SubscriptionState { - handle: SubscriptionHandle, -} - -impl SubscriptionState { - fn new(handle: SubscriptionHandle) -> Self { - Self { handle } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct SubscriptionKey { - conn_id: ConnectionId, - sub_id: String, -} - -impl SubscriptionKey { - fn new(conn_id: ConnectionId, sub_id: impl Into) -> Self { - Self { conn_id, sub_id: sub_id.into() } - } -} - -type SubscriptionStateRef = Arc>; -type SubscriptionRegistry = Arc>>; +type SubscriptionStateRef = Arc; +type SubscriptionRegistry = + Arc>>>; /// Long-lived registry owned by the RPC server #[derive(Clone, Default)] pub struct StatementSubscriptions { - rpc_connections: RpcConnections, registry: SubscriptionRegistry, } impl StatementSubscriptions { pub fn new() -> Self { - Self { - registry: Arc::new(RwLock::new(HashMap::new())), - rpc_connections: RpcConnections::new(MAX_SUBSCRIPTIONS_PER_CONNECTION), - } - } - - /// Reserves a slot for a new subscription - pub fn reserve(&self, conn_id: ConnectionId) -> Option { - let reserved_token = self.rpc_connections.reserve_space(conn_id)?; - Some(ReservedSubscription { - conn_id, - token: Some(reserved_token), - registry: self.registry.clone(), - }) - } - - /// Gets a subscription owned by the connection - pub fn get(&self, conn_id: ConnectionId, sub_id: &str) -> Option { - if !self.rpc_connections.contains_identifier(conn_id, sub_id) { - return None; - } - self.registry.read().get(&SubscriptionKey::new(conn_id, sub_id)).cloned() + Self { registry: Arc::new(RwLock::new(HashMap::new())) } } -} -/// Reservation for one pending subscription -pub struct ReservedSubscription { - conn_id: ConnectionId, - token: Option, - registry: SubscriptionRegistry, -} - -impl ReservedSubscription { + /// Registers a subscription owned by the connection pub fn register( - mut self, + &self, + conn_id: ConnectionId, sub_id: String, handle: SubscriptionHandle, ) -> Option { - let token = self.token.take()?; - let registered = token.register(sub_id.clone())?; - let state = Arc::new(Mutex::new(SubscriptionState::new(handle))); - let key = SubscriptionKey::new(self.conn_id, sub_id); - { - let mut registry = self.registry.write(); - if registry.contains_key(&key) { - return None; - } - registry.insert(key.clone(), state.clone()); + let state = Arc::new(handle); + let mut registry = self.registry.write(); + let connection = registry.entry(conn_id).or_default(); + if connection.contains_key(&sub_id) { + return None; } - Some(SubscriptionEntry { key, _registered: registered, registry: self.registry.clone() }) + + connection.insert(sub_id.clone(), state); + Some(SubscriptionEntry { conn_id, sub_id, registry: self.registry.clone() }) + } + + /// Gets a subscription owned by the connection + pub fn get(&self, conn_id: ConnectionId, sub_id: &str) -> Option { + self.registry + .read() + .get(&conn_id) + .and_then(|connection| connection.get(sub_id).cloned()) } } /// Registered subscription entry pub struct SubscriptionEntry { - key: SubscriptionKey, - _registered: RegisteredConnection, + conn_id: ConnectionId, + sub_id: String, registry: SubscriptionRegistry, } impl Drop for SubscriptionEntry { fn drop(&mut self) { - self.registry.write().remove(&self.key); + let mut registry = self.registry.write(); + if let Some(connection) = registry.get_mut(&self.conn_id) { + connection.remove(&self.sub_id); + if connection.is_empty() { + registry.remove(&self.conn_id); + } + } } } pub(crate) fn add_filter_sync( - state: &Arc>, + state: &Arc, filter: OptimizedTopicFilter, ) -> Result { - let handle = state.lock().handle.clone(); - match handle.add_filter(filter) { + match state.add_filter(filter) { Ok(filter_id) => Ok(AddFilterOutcome::Added(filter_id)), Err(AddFilterError::LimitReached) => Ok(AddFilterOutcome::LimitReached), Err(AddFilterError::Store(e)) => { @@ -154,11 +109,8 @@ pub(crate) fn add_filter_sync( } } -pub(crate) fn remove_filter_sync( - state: &Arc>, - filter_id: FilterId, -) -> bool { - state.lock().handle.remove_filter(filter_id) +pub(crate) fn remove_filter_sync(state: &Arc, filter_id: FilterId) -> bool { + state.remove_filter(filter_id) } pub(crate) fn filter_id_to_string(id: FilterId) -> String { @@ -230,7 +182,7 @@ mod tests { use sc_statement_store::{MultiFilterSubscriptionApi, Store}; use std::sync::Arc; - fn empty_subscription_state() -> Arc> { + fn empty_subscription_state() -> Arc { let dir = tempfile::tempdir().expect("tempdir"); let mut db_path: std::path::PathBuf = dir.path().into(); db_path.push("db"); @@ -379,7 +331,7 @@ mod tests { std::mem::forget(dir); let (handle, _live) = store.create_subscription(); - Arc::new(Mutex::new(SubscriptionState::new(handle))) + Arc::new(handle) } #[test] @@ -389,19 +341,11 @@ mod tests { let conn_b = ConnectionId(2); let sub_id = "same-subscription-id".to_string(); - let handle_a = empty_subscription_state().lock().handle.clone(); - let handle_b = empty_subscription_state().lock().handle.clone(); + let handle_a = (*empty_subscription_state()).clone(); + let handle_b = (*empty_subscription_state()).clone(); - let entry_a = subscriptions - .reserve(conn_a) - .unwrap() - .register(sub_id.clone(), handle_a) - .unwrap(); - let _entry_b = subscriptions - .reserve(conn_b) - .unwrap() - .register(sub_id.clone(), handle_b) - .unwrap(); + let entry_a = subscriptions.register(conn_a, sub_id.clone(), handle_a).unwrap(); + let _entry_b = subscriptions.register(conn_b, sub_id.clone(), handle_b).unwrap(); let state_a = subscriptions.get(conn_a, &sub_id).unwrap(); let state_b = subscriptions.get(conn_b, &sub_id).unwrap(); @@ -411,4 +355,20 @@ mod tests { assert!(subscriptions.get(conn_a, &sub_id).is_none()); assert!(subscriptions.get(conn_b, &sub_id).is_some()); } + + #[test] + fn duplicate_subscription_id_is_rejected_per_connection() { + let subscriptions = StatementSubscriptions::new(); + let conn_a = ConnectionId(1); + let conn_b = ConnectionId(2); + let sub_id = "same-subscription-id".to_string(); + + let handle_a = (*empty_subscription_state()).clone(); + let duplicate_handle = (*empty_subscription_state()).clone(); + let handle_b = (*empty_subscription_state()).clone(); + + let _entry = subscriptions.register(conn_a, sub_id.clone(), handle_a).unwrap(); + assert!(subscriptions.register(conn_a, sub_id.clone(), duplicate_handle).is_none()); + assert!(subscriptions.register(conn_b, sub_id, handle_b).is_some()); + } } diff --git a/substrate/client/rpc-spec-v2/src/statement/tests.rs b/substrate/client/rpc-spec-v2/src/statement/tests.rs index 128a9f54aef26..13cbbdd3807e8 100644 --- a/substrate/client/rpc-spec-v2/src/statement/tests.rs +++ b/substrate/client/rpc-spec-v2/src/statement/tests.rs @@ -17,9 +17,8 @@ // along with this program. If not, see . use super::{ - error::rpc_spec_v2::{INVALID_SUBSCRIPTION, REACHED_LIMITS}, + error::rpc_spec_v2::INVALID_SUBSCRIPTION, event::{NewStatementEntry, SubmitOutcome, SubscribeEvent}, - subscription::MAX_SUBSCRIPTIONS_PER_CONNECTION, StatementSpec, StatementSpecApiServer, }; use codec::Encode; @@ -477,21 +476,11 @@ async fn remove_filter_is_silent_for_unknown_subscription_and_filter() { } #[tokio::test] -async fn per_connection_subscription_cap_returns_reached_limits() { +async fn statement_subscribe_does_not_apply_local_subscription_cap() { let rpc = make_server(); let mut subs = Vec::new(); - for _ in 0..MAX_SUBSCRIPTIONS_PER_CONNECTION { + for _ in 0..17 { subs.push(subscribe(&rpc).await); } - - let err = rpc - .subscribe_unbounded("statement_unstable_subscribe", jsonrpsee::rpc_params![]) - .await - .expect_err("expected ReachedLimits at cap+1"); - let object = match err { - MethodsError::JsonRpc(e) => e, - other => panic!("expected ErrorObject, got {other:?}"), - }; - assert_eq!(object.code(), REACHED_LIMITS); } diff --git a/substrate/client/statement-store/src/lib.rs b/substrate/client/statement-store/src/lib.rs index 5948ab8c79646..657066bb79003 100644 --- a/substrate/client/statement-store/src/lib.rs +++ b/substrate/client/statement-store/src/lib.rs @@ -74,7 +74,7 @@ use sp_statement_store::{ pub use sp_statement_store::{Error, StatementStore, MAX_TOPICS}; use std::{ collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - sync::{atomic::AtomicU64, Arc}, + sync::Arc, time::{Duration, Instant}, }; pub use subscription::{ @@ -1654,23 +1654,16 @@ impl Store { impl MultiFilterSubscriptionApi for Arc { fn create_subscription(&self) -> (SubscriptionHandle, LiveEventStream) { - let state = Arc::new(parking_lot::Mutex::new( - crate::subscription::MultiFilterSubscriptionState::new(), - )); - let next_filter_id = Arc::new(AtomicU64::new(0)); - let add_filter_lock = Arc::new(parking_lot::Mutex::new(())); - let (wake_tx, wake_rx) = tokio::sync::mpsc::unbounded_channel(); - - let (sub_id, stream) = self.subscription_manager.subscribe_empty(state.clone(), wake_rx); + let inner = + Arc::new(parking_lot::Mutex::new(crate::subscription::SubscriptionHandleState::new())); + let (sub_id, control_tx, stream) = self.subscription_manager.subscribe_empty(); let handle = SubscriptionHandle { sub_id, - state, - next_filter_id, + inner, store: self.clone(), matchers: self.subscription_manager.matchers(), - wake_tx, - add_filter_lock, + control_tx, }; (handle, stream) } @@ -3286,10 +3279,14 @@ mod tests { mod multi_filter { use super::*; use crate::{ + subscription::{ + MatcherMessage, SubscriptionHandleState, SubscriptionsMatchersHandlers, + }, LiveEventStream, MultiFilterSubscriptionApi, MultiFilterSubscriptionEvent, SubscriptionHandle, }; use futures::StreamExt; + use sc_utils::id_sequence::SeqID; use sp_statement_store::{LiveStatementEvent, OptimizedTopicFilter}; use std::{ collections::{HashMap, HashSet}, @@ -3366,6 +3363,52 @@ mod tests { snapshots } + #[tokio::test] + async fn add_filter_registers_matcher_before_releasing_snapshot_lock() { + let (store, _dir) = arc_test_store(); + let (control_tx, mut control_rx) = tokio::sync::mpsc::unbounded_channel(); + let (matcher_tx, matcher_rx) = async_channel::bounded(1); + matcher_tx + .try_send(MatcherMessage::Unsubscribe(SeqID::from(999))) + .expect("matcher channel should have one free slot"); + + let handle = SubscriptionHandle { + sub_id: SeqID::from(0), + inner: Arc::new(parking_lot::Mutex::new(SubscriptionHandleState::new())), + store: store.clone(), + matchers: SubscriptionsMatchersHandlers::new(vec![matcher_tx]), + control_tx, + }; + + let add_filter_thread = + std::thread::spawn(move || handle.add_filter(OptimizedTopicFilter::Any)); + + tokio::time::timeout(Duration::from_secs(5), control_rx.recv()) + .await + .expect("control message should be sent") + .expect("control channel should remain open"); + + let write_lock_available = if let Some(guard) = store.index.try_write() { + drop(guard); + true + } else { + false + }; + + matcher_rx.recv().await.expect("filled matcher message should be present"); + let add_result = tokio::task::spawn_blocking(move || { + add_filter_thread.join().expect("add_filter thread should not panic") + }) + .await + .expect("join should complete"); + assert!(add_result.is_ok()); + + assert!( + !write_lock_available, + "matcher registration must happen before releasing the snapshot read lock" + ); + } + #[tokio::test] async fn create_subscription_no_filters_emits_no_events() { let (store, _dir) = arc_test_store(); diff --git a/substrate/client/statement-store/src/subscription.rs b/substrate/client/statement-store/src/subscription.rs index e7742835eeb65..716b55e0ca978 100644 --- a/substrate/client/statement-store/src/subscription.rs +++ b/substrate/client/statement-store/src/subscription.rs @@ -39,7 +39,6 @@ const SUBSCRIPTION_BUFFER_SIZE: usize = 128; /// Maximum number of active filters attached to one statement subscription pub const MAX_FILTERS_PER_SUBSCRIPTION: usize = 128; const REPLAY_CHUNK_RAW_BYTES: usize = 4 * 1024 * 1024; -const PENDING_REPLAYS_HARD_CAP: usize = MAX_FILTERS_PER_SUBSCRIPTION; const PENDING_LIVE_HARD_CAP: usize = 64 * 1024; const EMITTED_VIA_NEW_HARD_CAP: usize = 64 * 1024; @@ -119,12 +118,21 @@ pub trait MultiFilterSubscriptionApi: Send + Sync { #[derive(Clone)] pub struct SubscriptionHandle { pub(crate) sub_id: SeqID, - pub(crate) state: Arc>, - pub(crate) next_filter_id: Arc, + pub(crate) inner: Arc>, pub(crate) store: Arc, pub(crate) matchers: SubscriptionsMatchersHandlers, - pub(crate) wake_tx: mpsc::UnboundedSender<()>, - pub(crate) add_filter_lock: Arc>, + pub(crate) control_tx: mpsc::UnboundedSender, +} + +pub(crate) struct SubscriptionHandleState { + active_filter_ids: HashSet, + next_filter_id: u64, +} + +impl SubscriptionHandleState { + pub(crate) fn new() -> Self { + Self { active_filter_ids: HashSet::new(), next_filter_id: 0 } + } } impl SubscriptionHandle { @@ -133,52 +141,49 @@ impl SubscriptionHandle { &self, filter: OptimizedTopicFilter, ) -> std::result::Result { - let _guard = self.add_filter_lock.lock(); - { - let state = self.state.lock(); - if state.active_filter_ids.len() >= MAX_FILTERS_PER_SUBSCRIPTION || - state.pending_replays.len() >= PENDING_REPLAYS_HARD_CAP - { - return Err(AddFilterError::LimitReached); - } + let mut inner = self.inner.lock(); + if inner.active_filter_ids.len() >= MAX_FILTERS_PER_SUBSCRIPTION { + return Err(AddFilterError::LimitReached); } - let filter_id = - FilterId::new(self.next_filter_id.fetch_add(1, std::sync::atomic::Ordering::SeqCst)); + let filter_id = FilterId::new(inner.next_filter_id); + let control_tx = self.control_tx.clone(); let matchers = self.matchers.clone(); let sub_id = self.sub_id; let filter_to_register = filter.clone(); - let state = self.state.clone(); - let wake_tx = self.wake_tx.clone(); - let mut register = move |snapshot| { - state.lock().record_filter_added(filter_id, snapshot); + let mut registered = false; + let mut register = |snapshot| { + inner.next_filter_id = inner.next_filter_id.wrapping_add(1); + inner.active_filter_ids.insert(filter_id); + let _ = control_tx.send(SubscriptionControlMessage::AddFilter { filter_id, snapshot }); matchers.send_by_seq_id( sub_id, MatcherMessage::AddFilter { sub_id, filter_id, filter: filter_to_register.clone() }, ); - let _ = wake_tx.send(()); + registered = true; }; self.store.register_filter_with_snapshot(&filter, &mut register)?; + assert!(registered, "register_filter_with_snapshot always calls register; qed"); Ok(filter_id) } /// Removes a filter from this subscription pub fn remove_filter(&self, filter_id: FilterId) -> bool { - let _guard = self.add_filter_lock.lock(); - if !self.state.lock().record_filter_removed(filter_id) { + let mut inner = self.inner.lock(); + if !inner.active_filter_ids.remove(&filter_id) { return false; } + let _ = self.control_tx.send(SubscriptionControlMessage::RemoveFilter { filter_id }); self.matchers.send_by_seq_id( self.sub_id, MatcherMessage::RemoveFilter { sub_id: self.sub_id, filter_id }, ); - let _ = self.wake_tx.send(()); true } /// Returns active filter identifiers for this subscription pub fn filter_ids(&self) -> Vec { - self.state.lock().active_filter_ids.iter().copied().collect() + self.inner.lock().active_filter_ids.iter().copied().collect() } } @@ -193,6 +198,11 @@ struct PendingLiveStatement { matched_filter_ids: Vec, } +pub(crate) enum SubscriptionControlMessage { + AddFilter { filter_id: FilterId, snapshot: Vec> }, + RemoveFilter { filter_id: FilterId }, +} + pub(crate) struct MultiFilterSubscriptionState { active_filter_ids: HashSet, replays_in_progress: HashSet, @@ -242,6 +252,17 @@ impl MultiFilterSubscriptionState { true } + fn handle_control_message(&mut self, message: SubscriptionControlMessage) { + match message { + SubscriptionControlMessage::AddFilter { filter_id, snapshot } => { + self.record_filter_added(filter_id, snapshot); + }, + SubscriptionControlMessage::RemoveFilter { filter_id } => { + let _ = self.record_filter_removed(filter_id); + }, + } + } + fn next_event(&mut self) -> Option { if self.stopped { if self.stop_emitted { @@ -401,12 +422,20 @@ pub enum MultiFilterSubscriptionEvent { /// Stream of multi-filter subscription events pub struct LiveEventStream { rx: async_channel::Receiver, - control_rx: mpsc::UnboundedReceiver<()>, - state: Arc>, + control_rx: mpsc::UnboundedReceiver, + state: MultiFilterSubscriptionState, sub_id: SeqID, matchers: SubscriptionsMatchersHandlers, } +impl LiveEventStream { + fn drain_ready_control_messages(&mut self) { + while let Ok(message) = self.control_rx.try_recv() { + self.state.handle_control_message(message); + } + } +} + impl Stream for LiveEventStream { type Item = MultiFilterSubscriptionEvent; @@ -416,19 +445,22 @@ impl Stream for LiveEventStream { ) -> std::task::Poll> { let this = self.get_mut(); loop { - if let Some(event) = this.state.lock().next_event() { + if let Some(event) = this.state.next_event() { return Poll::Ready(Some(event)); } match this.control_rx.poll_recv(cx) { - Poll::Ready(Some(())) => continue, + Poll::Ready(Some(message)) => { + this.state.handle_control_message(message); + continue; + }, Poll::Ready(None) | Poll::Pending => {}, } match this.rx.poll_next_unpin(cx) { Poll::Ready(Some(event)) => { - if let Some(event) = this.state.lock().handle_live_event(event) { + this.drain_ready_control_messages(); + if let Some(event) = this.state.handle_live_event(event) { return Poll::Ready(Some(event)); } - continue; }, Poll::Ready(None) => return Poll::Ready(None), Poll::Pending => return Poll::Pending, @@ -569,17 +601,21 @@ impl SubscriptionsHandle { pub(crate) fn subscribe_empty( &self, - state: Arc>, - control_rx: mpsc::UnboundedReceiver<()>, - ) -> (SeqID, LiveEventStream) { + ) -> (SeqID, mpsc::UnboundedSender, LiveEventStream) { let sub_id = self.next_id(); let (tx, rx) = async_channel::bounded(SUBSCRIPTION_BUFFER_SIZE); + let (control_tx, control_rx) = tokio::sync::mpsc::unbounded_channel(); self.matchers .send_by_seq_id(sub_id, MatcherMessage::SubscribeEmpty { seq_id: sub_id, tx }); - let stream = - LiveEventStream { rx, control_rx, state, sub_id, matchers: self.matchers.clone() }; - (sub_id, stream) + let stream = LiveEventStream { + rx, + control_rx, + state: MultiFilterSubscriptionState::new(), + sub_id, + matchers: self.matchers.clone(), + }; + (sub_id, control_tx, stream) } pub(crate) fn notify(&self, statement: Statement) { @@ -929,7 +965,9 @@ pub struct SubscriptionsMatchersHandlers { impl SubscriptionsMatchersHandlers { /// Create new SubscriptionsMatchersHandlers with the given matcher task senders. - fn new(matchers: Vec>) -> SubscriptionsMatchersHandlers { + pub(crate) fn new( + matchers: Vec>, + ) -> SubscriptionsMatchersHandlers { SubscriptionsMatchersHandlers { matchers } } @@ -1094,6 +1132,71 @@ mod tests { assert!(state.next_event().is_none()); } + #[test] + fn multi_filter_control_add_registers_replay_before_live_events() { + let mut state = MultiFilterSubscriptionState::new(); + let filter_id = FilterId::new(7); + let snapshot = vec![vec![1, 2, 3]]; + + state.handle_control_message(SubscriptionControlMessage::AddFilter { + filter_id, + snapshot: snapshot.clone(), + }); + + assert!(state + .handle_live_event(LiveStatementEvent { + hash: sp_statement_store::hash_encoded(&snapshot[0]), + encoded: snapshot[0].clone(), + matched_filter_ids: vec![filter_id], + }) + .is_none()); + + assert!(matches!( + state.next_event(), + Some(MultiFilterSubscriptionEvent::ReplayStatements { + filter_id: id, + statements, + }) if id == filter_id && statements == snapshot + )); + assert!(matches!( + state.next_event(), + Some(MultiFilterSubscriptionEvent::ReplayDone { filter_id: id }) if id == filter_id + )); + assert!(state.next_event().is_none()); + } + + #[tokio::test] + async fn live_stream_processes_filter_control_before_later_live_match() { + let filter_id = FilterId::new(7); + let live_statement = signed_statement(7); + let (tx, rx) = async_channel::bounded(SUBSCRIPTION_BUFFER_SIZE); + let (control_tx, control_rx) = tokio::sync::mpsc::unbounded_channel(); + let (matcher_tx, _matcher_rx) = async_channel::bounded(MATCHERS_TASK_CHANNEL_BUFFER_SIZE); + let mut stream = LiveEventStream { + rx, + control_rx, + state: MultiFilterSubscriptionState::new(), + sub_id: SeqID::from(0), + matchers: SubscriptionsMatchersHandlers::new(vec![matcher_tx]), + }; + + control_tx + .send(SubscriptionControlMessage::AddFilter { filter_id, snapshot: Vec::new() }) + .unwrap(); + tx.send(live_event_for(&live_statement, vec![filter_id])).await.unwrap(); + + assert!(matches!( + stream.next().await, + Some(MultiFilterSubscriptionEvent::ReplayDone { filter_id: id }) if id == filter_id + )); + assert!(matches!( + stream.next().await, + Some(MultiFilterSubscriptionEvent::NewStatement(event)) + if event.hash == live_statement.hash() && + event.matched_filter_ids == vec![filter_id] + )); + } + #[test] fn multi_filter_new_statement_history_cap_stops_subscription() { let mut state = MultiFilterSubscriptionState::new(); From 53d65b91b26b6131b6a43d9403385e4371f2768e Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Wed, 13 May 2026 09:28:55 +0100 Subject: [PATCH 11/16] statement-store: clean up v2 api --- Cargo.lock | 1 + cumulus/zombienet/zombienet-sdk/Cargo.toml | 1 + .../tests/zombie_ci/statement_store/common.rs | 74 ++++++++++++------- .../client/rpc-spec-v2/src/statement/api.rs | 6 +- .../rpc-spec-v2/src/statement/statement.rs | 6 +- 5 files changed, 56 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f1c22a7650b6..027184bd03640 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5786,6 +5786,7 @@ dependencies = [ "sc-executor 0.32.0", "sc-executor-common 0.29.0", "sc-network-statement", + "sc-rpc-spec-v2", "sc-statement-store", "scale-info", "serde", diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index f7bf4660a4e49..c22c2fdf896e2 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -23,6 +23,7 @@ cumulus-zombienet-sdk-helpers = { workspace = true } sp-rpc = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true, features = ["serde"] } sc-network-statement = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } sc-statement-store = { workspace = true, default-features = true, features = ["test-helpers"] } sp-keyring = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs index 8150a742a162e..348089e5e3bb4 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs @@ -17,7 +17,7 @@ use sp_statement_store::{ }; use std::{ path::{Path, PathBuf}, - time::Duration, + time::{Duration, Instant}, }; use zombienet_sdk::{ subxt::{ @@ -34,30 +34,9 @@ pub(super) const COLLATOR_INFO_LOG_FILTER: &str = "info,statement-store=info,sta pub(super) const COLLATOR_TRACE_LOG_FILTER: &str = "info,statement-store=trace,statement-gossip=trace"; -#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] -#[serde(tag = "event", rename_all = "camelCase")] -pub(super) enum UnstableStatementEvent { - ReplayStatements { - #[serde(rename = "filterId")] - filter_id: String, - statements: Vec, - }, - ReplayDone { - #[serde(rename = "filterId")] - filter_id: String, - }, - NewStatements { - statements: Vec, - }, - Stop, -} - -#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] -pub(super) struct UnstableNewStatement { - pub statement: Bytes, - #[serde(rename = "filterIds")] - pub filter_ids: Vec, -} +pub(super) use sc_rpc_spec_v2::statement::{ + event::NewStatementEntry as UnstableNewStatement, SubscribeEvent as UnstableStatementEvent, +}; #[derive(Debug, Clone, Deserialize, PartialEq, Eq)] #[serde(untagged)] @@ -254,6 +233,13 @@ pub(super) fn create_chain_spec_with_allowances( participant_count: u32, base_dir: &Path, ) -> Result { + let started_at = Instant::now(); + info!( + "Creating statement allowance chain spec: participants={}, base_dir={}", + participant_count, + base_dir.display(), + ); + let chain_spec_template = include_str!("people-westend-local-spec.json"); let mut chain_spec: serde_json::Value = serde_json::from_str(chain_spec_template) .map_err(|e| anyhow!("Failed to parse chain spec JSON: {}", e))?; @@ -268,6 +254,15 @@ pub(super) fn create_chain_spec_with_allowances( let allowance_hex = format!("0x{}", HexDisplay::from(&allowance.encode())); info!("Injecting statement allowance: {:}", allowance_hex); for idx in 0..participant_count { + if idx > 0 && idx % 5_000 == 0 { + info!( + "Injected statement allowances: {}/{} elapsed_s={}", + idx, + participant_count, + started_at.elapsed().as_secs(), + ); + } + let keypair = get_keypair(idx); let account_id = keypair.public(); @@ -276,13 +271,25 @@ pub(super) fn create_chain_spec_with_allowances( genesis.insert(storage_key_hex, serde_json::Value::String(allowance_hex.clone())); } + info!( + "Finished injecting statement allowances: {}/{} elapsed_s={}", + participant_count, + participant_count, + started_at.elapsed().as_secs(), + ); let chain_spec_path = base_dir.join("people-westend-custom.json"); + info!("Serializing chain spec: {}", chain_spec_path.display()); let chain_spec_json = serde_json::to_string_pretty(&chain_spec) .map_err(|e| anyhow!("Failed to serialize chain spec: {}", e))?; std::fs::write(&chain_spec_path, chain_spec_json) .map_err(|e| anyhow!("Failed to write chain spec to file: {}", e))?; + info!( + "Wrote chain spec: {} elapsed_s={}", + chain_spec_path.display(), + started_at.elapsed().as_secs(), + ); Ok(chain_spec_path) } @@ -325,6 +332,12 @@ async fn launch_network( ) -> Result, anyhow::Error> { let images = zombienet_sdk::environment::get_images_from_env(); let base_dir = base_dir()?; + info!( + "Preparing zombienet network config: collators={}, chain_spec={}, base_dir={}", + collators.len(), + chain_spec_path.display(), + base_dir.display(), + ); let config = NetworkConfigBuilder::new() .with_relaychain(|r| { @@ -356,8 +369,12 @@ async fn launch_network( .build() .map_err(format_build_errors)?; + info!("Initialising zombienet network"); let network = crate::utils::initialize_network(config).await?; - assert!(network.wait_until_is_up(60).await.is_ok()); + info!("Waiting for zombienet network to be up"); + let wait_result = network.wait_until_is_up(60).await; + info!("Zombienet network wait finished: ok={}", wait_result.is_ok()); + assert!(wait_result.is_ok()); Ok(network) } @@ -382,6 +399,11 @@ pub(super) async fn spawn_network_with_injected_allowances( ) -> Result, anyhow::Error> { assert!(!collators.is_empty()); let base_dir = base_dir()?; + info!( + "Spawning injected allowance network: collators={}, participants={}", + collators.len(), + participant_count, + ); let chain_spec_path = create_chain_spec_with_allowances(participant_count, &base_dir)?; let args = collator_args(participant_count, COLLATOR_TRACE_LOG_FILTER); launch_network(collators, &chain_spec_path, args).await diff --git a/substrate/client/rpc-spec-v2/src/statement/api.rs b/substrate/client/rpc-spec-v2/src/statement/api.rs index 72e86821d2099..e87fe6b6fa496 100644 --- a/substrate/client/rpc-spec-v2/src/statement/api.rs +++ b/substrate/client/rpc-spec-v2/src/statement/api.rs @@ -39,7 +39,7 @@ pub trait StatementSpecApi { /// Attaches a filter to an existing subscription #[method(name = "statement_unstable_add_filter", with_extensions)] - async fn statement_unstable_add_filter( + fn statement_unstable_add_filter( &self, subscription: String, topic_filter: TopicFilter, @@ -47,7 +47,7 @@ pub trait StatementSpecApi { /// Detaches a filter from a subscription #[method(name = "statement_unstable_remove_filter", with_extensions)] - async fn statement_unstable_remove_filter( + fn statement_unstable_remove_filter( &self, subscription: String, filter_id: String, @@ -55,5 +55,5 @@ pub trait StatementSpecApi { /// Submits a SCALE-encoded statement to the store #[method(name = "statement_unstable_submit")] - async fn statement_unstable_submit(&self, encoded: Bytes) -> Result; + fn statement_unstable_submit(&self, encoded: Bytes) -> Result; } diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs index 937d0bd850569..1fb7d5912eb9c 100644 --- a/substrate/client/rpc-spec-v2/src/statement/statement.rs +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -113,7 +113,7 @@ where .spawn("statement-unstable-subscribe-init", Some("rpc"), fut.boxed()); } - async fn statement_unstable_add_filter( + fn statement_unstable_add_filter( &self, ext: &Extensions, subscription: String, @@ -134,7 +134,7 @@ where } } - async fn statement_unstable_remove_filter( + fn statement_unstable_remove_filter( &self, ext: &Extensions, subscription: String, @@ -147,7 +147,7 @@ where Ok(()) } - async fn statement_unstable_submit(&self, encoded: Bytes) -> Result { + fn statement_unstable_submit(&self, encoded: Bytes) -> Result { let statement = Statement::decode(&mut &encoded[..]) .map_err(|e| Error::InvalidParam(format!("Error decoding statement: {e}")))?; if self.store.has_statement(&statement.hash()) { From 03deb072e42eab0d6f860aaa9a77f6e38c78c1a8 Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Wed, 13 May 2026 22:12:07 +0100 Subject: [PATCH 12/16] statement-store: get subs with timeout --- .../client/rpc-spec-v2/src/statement/api.rs | 4 +- .../rpc-spec-v2/src/statement/statement.rs | 27 +++++---- .../rpc-spec-v2/src/statement/subscription.rs | 58 ++++++++++++++++++- 3 files changed, 73 insertions(+), 16 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/statement/api.rs b/substrate/client/rpc-spec-v2/src/statement/api.rs index e87fe6b6fa496..d091c8be412f2 100644 --- a/substrate/client/rpc-spec-v2/src/statement/api.rs +++ b/substrate/client/rpc-spec-v2/src/statement/api.rs @@ -35,11 +35,11 @@ pub trait StatementSpecApi { item = SubscribeEvent, with_extensions, )] - fn statement_unstable_subscribe(&self); + async fn statement_unstable_subscribe(&self); /// Attaches a filter to an existing subscription #[method(name = "statement_unstable_add_filter", with_extensions)] - fn statement_unstable_add_filter( + async fn statement_unstable_add_filter( &self, subscription: String, topic_filter: TopicFilter, diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs index 1fb7d5912eb9c..243e6c8a53e89 100644 --- a/substrate/client/rpc-spec-v2/src/statement/statement.rs +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -90,21 +90,26 @@ where B: StatementStore + Send + Sync + 'static, Arc: MultiFilterSubscriptionApi, { - fn statement_unstable_subscribe(&self, pending: PendingSubscriptionSink, _ext: &Extensions) { + async fn statement_unstable_subscribe( + &self, + pending: PendingSubscriptionSink, + _ext: &Extensions, + ) { let subscriptions = self.subscriptions.clone(); let store = self.store.clone(); let connection_id = pending.connection_id(); - let fut = async move { - let Ok(sink) = pending.accept().await.map(Subscription::from) else { return }; - let sub_id = read_subscription_id_as_string(&sink); - let (handle, live_stream) = store.create_subscription(); + let (handle, live_stream) = store.create_subscription(); + let Ok(sink) = pending.accept().await.map(Subscription::from) else { return }; + let sub_id = read_subscription_id_as_string(&sink); - let Some(entry) = subscriptions.register(connection_id, sub_id.clone(), handle) else { - log::debug!(target: LOG_TARGET, "duplicate subscription id {sub_id}; aborting"); - return; - }; + let Some(entry) = subscriptions.register(connection_id, sub_id.clone(), handle) else { + log::debug!(target: LOG_TARGET, "duplicate subscription id {sub_id}; aborting"); + let _ = sink.send(&crate::statement::event::SubscribeEvent::Stop).await; + return; + }; + let fut = async move { run_subscription_task(sink, live_stream).await; drop(entry); }; @@ -113,7 +118,7 @@ where .spawn("statement-unstable-subscribe-init", Some("rpc"), fut.boxed()); } - fn statement_unstable_add_filter( + async fn statement_unstable_add_filter( &self, ext: &Extensions, subscription: String, @@ -122,7 +127,7 @@ where let conn_id = connection_id(ext); let topic_filter = validate_topic_filter(topic_filter)?; - let Some(state) = self.subscriptions.get(conn_id, &subscription) else { + let Some(state) = self.subscriptions.get_with_timeout(conn_id, &subscription).await else { return Err(Error::InvalidSubscription); }; diff --git a/substrate/client/rpc-spec-v2/src/statement/subscription.rs b/substrate/client/rpc-spec-v2/src/statement/subscription.rs index f1ffca0e144d9..0382ec98ac8c9 100644 --- a/substrate/client/rpc-spec-v2/src/statement/subscription.rs +++ b/substrate/client/rpc-spec-v2/src/statement/subscription.rs @@ -28,7 +28,10 @@ use sc_statement_store::{ AddFilterError, LiveEventStream, MultiFilterSubscriptionEvent, SubscriptionHandle, }; use sp_statement_store::{FilterId, OptimizedTopicFilter}; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, sync::Arc, time::Duration}; +use tokio::sync::Notify; + +const SUBSCRIPTION_REGISTRATION_TIMEOUT: Duration = Duration::from_millis(250); pub(crate) enum AddFilterOutcome { Added(FilterId), @@ -43,11 +46,15 @@ type SubscriptionRegistry = #[derive(Clone, Default)] pub struct StatementSubscriptions { registry: SubscriptionRegistry, + registration_notify: Arc, } impl StatementSubscriptions { pub fn new() -> Self { - Self { registry: Arc::new(RwLock::new(HashMap::new())) } + Self { + registry: Arc::new(RwLock::new(HashMap::new())), + registration_notify: Arc::new(Notify::new()), + } } /// Registers a subscription owned by the connection @@ -65,6 +72,8 @@ impl StatementSubscriptions { } connection.insert(sub_id.clone(), state); + drop(registry); + self.registration_notify.notify_waiters(); Some(SubscriptionEntry { conn_id, sub_id, registry: self.registry.clone() }) } @@ -75,6 +84,27 @@ impl StatementSubscriptions { .get(&conn_id) .and_then(|connection| connection.get(sub_id).cloned()) } + + pub async fn get_with_timeout( + &self, + conn_id: ConnectionId, + sub_id: &str, + ) -> Option { + match tokio::time::timeout(SUBSCRIPTION_REGISTRATION_TIMEOUT, async { + loop { + let notified = self.registration_notify.notified(); + if let Some(state) = self.get(conn_id, sub_id) { + return state; + } + notified.await; + } + }) + .await + { + Ok(state) => Some(state), + Err(_) => self.get(conn_id, sub_id), + } + } } /// Registered subscription entry @@ -180,7 +210,7 @@ async fn send_event(sink: &Subscription, event: &SubscribeEvent) -> bool { mod tests { use super::*; use sc_statement_store::{MultiFilterSubscriptionApi, Store}; - use std::sync::Arc; + use std::{sync::Arc, time::Duration}; fn empty_subscription_state() -> Arc { let dir = tempfile::tempdir().expect("tempdir"); @@ -371,4 +401,26 @@ mod tests { assert!(subscriptions.register(conn_a, sub_id.clone(), duplicate_handle).is_none()); assert!(subscriptions.register(conn_b, sub_id, handle_b).is_some()); } + + #[tokio::test] + async fn subscription_lookup_waits_for_delayed_registration() { + let subscriptions = StatementSubscriptions::new(); + let conn_id = ConnectionId(1); + let sub_id = "eventually-registered".to_string(); + let handle = (*empty_subscription_state()).clone(); + let (release_entry_tx, release_entry_rx) = tokio::sync::oneshot::channel(); + + let subscriptions_for_task = subscriptions.clone(); + let sub_id_for_task = sub_id.clone(); + tokio::spawn(async move { + tokio::time::sleep(Duration::from_millis(10)).await; + let _entry = subscriptions_for_task + .register(conn_id, sub_id_for_task, handle) + .expect("delayed registration should succeed"); + let _ = release_entry_rx.await; + }); + + assert!(subscriptions.get_with_timeout(conn_id, &sub_id).await.is_some()); + let _ = release_entry_tx.send(()); + } } From 787ba0de8b57d403a21c85eb8593e06c6665966e Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Thu, 14 May 2026 17:41:14 +0100 Subject: [PATCH 13/16] statement-store: reuse submit outcome across RPC APIs --- .../tests/zombie_ci/statement_store/common.rs | 18 ++++-------- .../zombie_ci/statement_store/integration.rs | 18 ++++++------ .../statement_store/unstable_bench.rs | 4 +-- substrate/client/rpc-spec-v2/Cargo.toml | 2 +- .../client/rpc-spec-v2/src/statement/api.rs | 4 +-- .../client/rpc-spec-v2/src/statement/event.rs | 15 ---------- .../client/rpc-spec-v2/src/statement/mod.rs | 3 +- .../rpc-spec-v2/src/statement/statement.rs | 20 +++++++------ .../client/rpc-spec-v2/src/statement/tests.rs | 4 +-- .../primitives/statement-store/src/lib.rs | 4 +-- .../statement-store/src/store_api.rs | 28 +++++++++++++++++++ 11 files changed, 65 insertions(+), 55 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs index 348089e5e3bb4..baceae3e9e1e4 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs @@ -10,10 +10,10 @@ use anyhow::anyhow; use codec::Encode; use log::info; use sc_statement_store::test_utils::get_keypair; -use serde::Deserialize; use sp_core::{hexdisplay::HexDisplay, Bytes, Pair}; use sp_statement_store::{ - statement_allowance_key, StatementAllowance, StatementEvent, SubmitResult, Topic, TopicFilter, + statement_allowance_key, StatementAllowance, StatementEvent, SubmitOutcome, SubmitResult, + Topic, TopicFilter, }; use std::{ path::{Path, PathBuf}, @@ -35,16 +35,10 @@ pub(super) const COLLATOR_TRACE_LOG_FILTER: &str = "info,statement-store=trace,statement-gossip=trace"; pub(super) use sc_rpc_spec_v2::statement::{ - event::NewStatementEntry as UnstableNewStatement, SubscribeEvent as UnstableStatementEvent, + event::NewStatementEntry as UnstableNewStatement, + AddFilterResponse as UnstableAddFilterResponse, SubscribeEvent as UnstableStatementEvent, }; -#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] -#[serde(untagged)] -pub(super) enum UnstableAddFilterResponse { - Ok(String), - LimitReached { result: String }, -} - pub(super) async fn submit_statement( rpc: &RpcClient, statement: &sp_statement_store::Statement, @@ -57,9 +51,9 @@ pub(super) async fn submit_statement( pub(super) async fn submit_statement_unstable( rpc: &RpcClient, statement: &sp_statement_store::Statement, -) -> Result { +) -> Result { let encoded: Bytes = statement.encode().into(); - let result: SubmitResult = + let result: SubmitOutcome = rpc.request("statement_unstable_submit", rpc_params![encoded]).await?; Ok(result) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs index 96e801d9e3f23..ca926a7ff24ea 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs @@ -25,7 +25,7 @@ use sc_statement_store::{ use sp_core::{sr25519, Bytes, Pair}; use sp_runtime::BoundedVec; use sp_statement_store::{ - RejectionReason, Statement, StatementAllowance, SubmitResult, Topic, TopicFilter, + RejectionReason, Statement, StatementAllowance, SubmitOutcome, SubmitResult, Topic, TopicFilter, }; use statement_store_subxt::transactions::Signer; use std::{ @@ -110,8 +110,8 @@ fn match_all_filter(topic: Topic) -> TopicFilter { fn filter_id(response: UnstableAddFilterResponse) -> String { match response { UnstableAddFilterResponse::Ok(id) => id, - UnstableAddFilterResponse::LimitReached { result } => { - panic!("Expected filter id, got {result}") + UnstableAddFilterResponse::LimitReached(result) => { + panic!("Expected filter id, got {result:?}") }, } } @@ -176,7 +176,7 @@ async fn statement_store_unstable_basic_propagation() -> Result<(), anyhow::Erro assert!(replayed.is_empty(), "Fresh unstable filter should not replay statements"); let result = submit_statement_unstable(&charlie_rpc, &statement).await?; - assert_eq!(result, SubmitResult::New); + assert_eq!(result, SubmitOutcome::New); let received = expect_one_unstable_statement(&mut subscription, 20).await?; assert_eq!(received.statement, expected, "Unstable statement data mismatch"); @@ -201,8 +201,8 @@ async fn statement_store_unstable_rpc_multi_filter_flow() -> Result<(), anyhow:: let pre_existing = create_test_statement(&get_keypair(0), &[topic_a], None, vec![1], u32::MAX, 0); - assert_eq!(submit_statement_unstable(&alice_rpc, &pre_existing).await?, SubmitResult::New); - assert_eq!(submit_statement_unstable(&alice_rpc, &pre_existing).await?, SubmitResult::Known); + assert_eq!(submit_statement_unstable(&alice_rpc, &pre_existing).await?, SubmitOutcome::New); + assert_eq!(submit_statement_unstable(&alice_rpc, &pre_existing).await?, SubmitOutcome::Known); let mut subscription = subscribe_unstable(&alice_rpc).await?; let subscription_id = unstable_subscription_id(&subscription)?; @@ -222,7 +222,7 @@ async fn statement_store_unstable_rpc_multi_filter_flow() -> Result<(), anyhow:: let live_ab = create_test_statement(&get_keypair(1), &[topic_a, topic_b], None, vec![2], u32::MAX, 0); - assert_eq!(submit_statement_unstable(&alice_rpc, &live_ab).await?, SubmitResult::New); + assert_eq!(submit_statement_unstable(&alice_rpc, &live_ab).await?, SubmitOutcome::New); match expect_unstable_event(&mut subscription, 20).await? { UnstableStatementEvent::NewStatements { statements } => { @@ -240,7 +240,7 @@ async fn statement_store_unstable_rpc_multi_filter_flow() -> Result<(), anyhow:: assert_eq!( submit_statement_unstable(&alice_rpc, &live_b_after_remove).await?, - SubmitResult::New + SubmitOutcome::New ); match expect_unstable_event(&mut subscription, 20).await? { @@ -260,7 +260,7 @@ async fn statement_store_unstable_rpc_multi_filter_flow() -> Result<(), anyhow:: assert_eq!( submit_statement_unstable(&alice_rpc, &ignored_after_remove).await?, - SubmitResult::New + SubmitOutcome::New ); assert_no_unstable_event(&mut subscription, 3).await?; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/unstable_bench.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/unstable_bench.rs index e816da8b03dca..845cdbdb8e8f4 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/unstable_bench.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/unstable_bench.rs @@ -57,8 +57,8 @@ async fn add_match_all_filter_unstable( .await? { UnstableAddFilterResponse::Ok(filter_id) => Ok(filter_id), - UnstableAddFilterResponse::LimitReached { result } => { - Err(anyhow!("Unexpected unstable filter limit response: {result}")) + UnstableAddFilterResponse::LimitReached(result) => { + Err(anyhow!("Unexpected unstable filter limit response: {result:?}")) }, } } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 6dfc38cba97f8..98eac00944093 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -43,7 +43,7 @@ sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-rpc = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-statement-store = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true, features = ["serde"] } sp-version = { workspace = true, default-features = true } thiserror = { workspace = true } tokio = { features = ["sync"], workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/statement/api.rs b/substrate/client/rpc-spec-v2/src/statement/api.rs index d091c8be412f2..c86fffdcc14cd 100644 --- a/substrate/client/rpc-spec-v2/src/statement/api.rs +++ b/substrate/client/rpc-spec-v2/src/statement/api.rs @@ -20,11 +20,11 @@ use crate::statement::{ error::Error, - event::{AddFilterResponse, SubmitOutcome, SubscribeEvent}, + event::{AddFilterResponse, SubscribeEvent}, }; use jsonrpsee::proc_macros::rpc; use sp_core::Bytes; -use sp_statement_store::TopicFilter; +use sp_statement_store::{SubmitOutcome, TopicFilter}; #[rpc(client, server)] pub trait StatementSpecApi { diff --git a/substrate/client/rpc-spec-v2/src/statement/event.rs b/substrate/client/rpc-spec-v2/src/statement/event.rs index 9afa0cd66ea05..4b5de26ebe3e3 100644 --- a/substrate/client/rpc-spec-v2/src/statement/event.rs +++ b/substrate/client/rpc-spec-v2/src/statement/event.rs @@ -18,7 +18,6 @@ use serde::{Deserialize, Serialize}; use sp_core::Bytes; -use sp_statement_store::{InvalidReason, RejectionReason}; /// Subscription notification event #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] @@ -90,17 +89,3 @@ impl AddFilterResponse { }) } } - -/// Statement submission outcome -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -#[serde(tag = "status", rename_all = "camelCase")] -pub enum SubmitOutcome { - /// The statement was accepted and was not already present in the store - New, - /// The statement is already present in the store - Known, - /// The statement was valid but the store rejected it - Rejected(RejectionReason), - /// The statement failed validation - Invalid(InvalidReason), -} diff --git a/substrate/client/rpc-spec-v2/src/statement/mod.rs b/substrate/client/rpc-spec-v2/src/statement/mod.rs index 6b2185bf24c8b..31a8e24057a12 100644 --- a/substrate/client/rpc-spec-v2/src/statement/mod.rs +++ b/substrate/client/rpc-spec-v2/src/statement/mod.rs @@ -30,7 +30,8 @@ mod tests; pub use api::StatementSpecApiServer; pub use error::Error; -pub use event::{AddFilterResponse, SubmitOutcome, SubscribeEvent}; +pub use event::{AddFilterResponse, SubscribeEvent}; +pub use sp_statement_store::SubmitOutcome; pub use statement::StatementSpec; pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::statement"; diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs index 243e6c8a53e89..db4dc3492c238 100644 --- a/substrate/client/rpc-spec-v2/src/statement/statement.rs +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -20,7 +20,7 @@ use crate::{ statement::{ api::StatementSpecApiServer, error::Error, - event::{AddFilterResponse, SubmitOutcome}, + event::AddFilterResponse, subscription::{ add_filter_sync, parse_filter_id, remove_filter_sync, run_subscription_task, AddFilterOutcome, StatementSubscriptions, @@ -38,7 +38,8 @@ use sc_rpc::utils::Subscription; use sc_statement_store::MultiFilterSubscriptionApi; use sp_core::Bytes; use sp_statement_store::{ - OptimizedTopicFilter, Statement, StatementSource, StatementStore, SubmitResult, TopicFilter, + OptimizedTopicFilter, Statement, StatementSource, StatementStore, SubmitOutcome, SubmitResult, + TopicFilter, }; use std::sync::Arc; @@ -158,15 +159,16 @@ where if self.store.has_statement(&statement.hash()) { return Ok(SubmitOutcome::Known); } - match self.store.submit(statement, StatementSource::Local) { - SubmitResult::New => Ok(SubmitOutcome::New), - SubmitResult::Known => Ok(SubmitOutcome::Known), - SubmitResult::Rejected(reason) => Ok(SubmitOutcome::Rejected(reason)), - SubmitResult::Invalid(reason) => Ok(SubmitOutcome::Invalid(reason)), - SubmitResult::KnownExpired => { + let submit_result = self.store.submit(statement, StatementSource::Local); + match SubmitOutcome::from_submit_result(submit_result) { + Ok(outcome) => Ok(outcome), + Err(SubmitResult::KnownExpired) => { Err(Error::InternalError("store returned KnownExpired for local submit".into())) }, - SubmitResult::InternalError(e) => Err(Error::InternalError(e.to_string())), + Err(SubmitResult::InternalError(e)) => Err(Error::InternalError(e.to_string())), + Err(other) => Err(Error::InternalError(format!( + "store returned unsupported submit result: {other:?}", + ))), } } } diff --git a/substrate/client/rpc-spec-v2/src/statement/tests.rs b/substrate/client/rpc-spec-v2/src/statement/tests.rs index 13cbbdd3807e8..36bb57c66eab8 100644 --- a/substrate/client/rpc-spec-v2/src/statement/tests.rs +++ b/substrate/client/rpc-spec-v2/src/statement/tests.rs @@ -18,8 +18,8 @@ use super::{ error::rpc_spec_v2::INVALID_SUBSCRIPTION, - event::{NewStatementEntry, SubmitOutcome, SubscribeEvent}, - StatementSpec, StatementSpecApiServer, + event::{NewStatementEntry, SubscribeEvent}, + StatementSpec, StatementSpecApiServer, SubmitOutcome, }; use codec::Encode; use jsonrpsee::{core::server::Subscription as RpcSubscription, MethodsError, RpcModule}; diff --git a/substrate/primitives/statement-store/src/lib.rs b/substrate/primitives/statement-store/src/lib.rs index 2e06dbd114cee..859d9a456bda6 100644 --- a/substrate/primitives/statement-store/src/lib.rs +++ b/substrate/primitives/statement-store/src/lib.rs @@ -207,8 +207,8 @@ pub fn get_allowance(account_id: impl AsRef<[u8]>) -> StatementAllowance { #[cfg(feature = "std")] pub use store_api::{ Error, FilterDecision, FilterId, InvalidReason, LiveStatementEvent, OptimizedTopicFilter, - RejectionReason, Result, StatementEvent, StatementSource, StatementStore, SubmitResult, - TopicFilter, + RejectionReason, Result, StatementEvent, StatementSource, StatementStore, SubmitOutcome, + SubmitResult, TopicFilter, }; #[cfg(feature = "std")] diff --git a/substrate/primitives/statement-store/src/store_api.rs b/substrate/primitives/statement-store/src/store_api.rs index 2de8638d538aa..b7d9eb4b95707 100644 --- a/substrate/primitives/statement-store/src/store_api.rs +++ b/substrate/primitives/statement-store/src/store_api.rs @@ -234,6 +234,34 @@ pub enum SubmitResult { InternalError(Error), } +/// Statement submission outcome exposed by RPC APIs +#[derive(Debug, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(tag = "status", rename_all = "camelCase"))] +pub enum SubmitOutcome { + /// Statement was accepted as new + New, + /// Statement was already known + Known, + /// Statement was rejected because the store is full or priority is too low + Rejected(RejectionReason), + /// Statement failed validation + Invalid(InvalidReason), +} + +impl SubmitOutcome { + /// Converts a store submission result into the RPC-visible outcome + pub fn from_submit_result(result: SubmitResult) -> std::result::Result { + match result { + SubmitResult::New => Ok(SubmitOutcome::New), + SubmitResult::Known => Ok(SubmitOutcome::Known), + SubmitResult::Rejected(reason) => Ok(SubmitOutcome::Rejected(reason)), + SubmitResult::Invalid(reason) => Ok(SubmitOutcome::Invalid(reason)), + other => Err(other), + } + } +} + /// An item returned by the statement subscription stream. #[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] From 0e6b147e318c7633775cbfc85b13e6c4f828ffd9 Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Fri, 15 May 2026 14:47:31 +0100 Subject: [PATCH 14/16] statement-store: make add filter async --- substrate/client/statement-store/src/lib.rs | 90 ++-- .../statement-store/src/subscription.rs | 421 +++++++++--------- 2 files changed, 251 insertions(+), 260 deletions(-) diff --git a/substrate/client/statement-store/src/lib.rs b/substrate/client/statement-store/src/lib.rs index 657066bb79003..65774aef229d5 100644 --- a/substrate/client/statement-store/src/lib.rs +++ b/substrate/client/statement-store/src/lib.rs @@ -74,9 +74,10 @@ use sp_statement_store::{ pub use sp_statement_store::{Error, StatementStore, MAX_TOPICS}; use std::{ collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - sync::Arc, + sync::{Arc, Weak}, time::{Duration, Instant}, }; +use subscription::ReplaySnapshotProvider; pub use subscription::{ AddFilterError, LiveEventStream, MultiFilterSubscriptionApi, MultiFilterSubscriptionEvent, StatementStoreSubscriptionApi, SubscriptionHandle, MAX_FILTERS_PER_SUBSCRIPTION, @@ -354,6 +355,19 @@ pub struct Store { metrics: PrometheusMetrics, } +impl ReplaySnapshotProvider for Weak { + fn register_filter_with_snapshot( + &self, + filter: &OptimizedTopicFilter, + register: &mut dyn FnMut(Vec>), + ) -> Result<()> { + let Some(store) = self.upgrade() else { + return Err(Error::InvalidConfig("statement store is closed".into())); + }; + store.register_filter_with_snapshot(filter, register) + } +} + enum IndexQuery { Unknown, Exists, @@ -1656,15 +1670,11 @@ impl MultiFilterSubscriptionApi for Arc { fn create_subscription(&self) -> (SubscriptionHandle, LiveEventStream) { let inner = Arc::new(parking_lot::Mutex::new(crate::subscription::SubscriptionHandleState::new())); - let (sub_id, control_tx, stream) = self.subscription_manager.subscribe_empty(); - - let handle = SubscriptionHandle { - sub_id, - inner, - store: self.clone(), - matchers: self.subscription_manager.matchers(), - control_tx, - }; + let snapshot_provider: Arc = Arc::new(Arc::downgrade(self)); + let (sub_id, stream) = self.subscription_manager.subscribe_empty(snapshot_provider); + + let handle = + SubscriptionHandle { sub_id, inner, matchers: self.subscription_manager.matchers() }; (handle, stream) } } @@ -3279,14 +3289,10 @@ mod tests { mod multi_filter { use super::*; use crate::{ - subscription::{ - MatcherMessage, SubscriptionHandleState, SubscriptionsMatchersHandlers, - }, LiveEventStream, MultiFilterSubscriptionApi, MultiFilterSubscriptionEvent, SubscriptionHandle, }; use futures::StreamExt; - use sc_utils::id_sequence::SeqID; use sp_statement_store::{LiveStatementEvent, OptimizedTopicFilter}; use std::{ collections::{HashMap, HashSet}, @@ -3364,49 +3370,24 @@ mod tests { } #[tokio::test] - async fn add_filter_registers_matcher_before_releasing_snapshot_lock() { + async fn add_filter_returns_before_snapshot_collection_finishes() { let (store, _dir) = arc_test_store(); - let (control_tx, mut control_rx) = tokio::sync::mpsc::unbounded_channel(); - let (matcher_tx, matcher_rx) = async_channel::bounded(1); - matcher_tx - .try_send(MatcherMessage::Unsubscribe(SeqID::from(999))) - .expect("matcher channel should have one free slot"); - - let handle = SubscriptionHandle { - sub_id: SeqID::from(0), - inner: Arc::new(parking_lot::Mutex::new(SubscriptionHandleState::new())), - store: store.clone(), - matchers: SubscriptionsMatchersHandlers::new(vec![matcher_tx]), - control_tx, - }; - - let add_filter_thread = - std::thread::spawn(move || handle.add_filter(OptimizedTopicFilter::Any)); - - tokio::time::timeout(Duration::from_secs(5), control_rx.recv()) - .await - .expect("control message should be sent") - .expect("control channel should remain open"); + let (handle, mut stream) = store.create_subscription(); + let index_write_guard = store.index.write(); + let (filter_id_tx, filter_id_rx) = std::sync::mpsc::channel(); - let write_lock_available = if let Some(guard) = store.index.try_write() { - drop(guard); - true - } else { - false - }; + std::thread::spawn(move || { + let filter_id = handle.add_filter(OptimizedTopicFilter::Any).unwrap(); + filter_id_tx.send(filter_id).unwrap(); + }); - matcher_rx.recv().await.expect("filled matcher message should be present"); - let add_result = tokio::task::spawn_blocking(move || { - add_filter_thread.join().expect("add_filter thread should not panic") - }) - .await - .expect("join should complete"); - assert!(add_result.is_ok()); + let filter_id = filter_id_rx + .recv_timeout(Duration::from_secs(1)) + .expect("add_filter should not wait for snapshot collection"); + drop(index_write_guard); - assert!( - !write_lock_available, - "matcher registration must happen before releasing the snapshot read lock" - ); + let snapshots = drain_replays(&mut stream, [filter_id], Duration::from_secs(1)).await; + assert!(snapshots[&filter_id].is_empty()); } #[tokio::test] @@ -3540,6 +3521,9 @@ mod tests { let id_ab = handle .add_filter(OptimizedTopicFilter::MatchAll(HashSet::from([topic_a, topic_b]))) .unwrap(); + let snapshots = + drain_replays(&mut stream, [id_a, id_b, id_ab], Duration::from_secs(1)).await; + assert!(snapshots.values().all(|statements| statements.is_empty())); let s1 = signed_statement_with_topics(1, &[topic_a], None); store.submit(s1, StatementSource::Local); diff --git a/substrate/client/statement-store/src/subscription.rs b/substrate/client/statement-store/src/subscription.rs index 716b55e0ca978..59b6902e1a1c9 100644 --- a/substrate/client/statement-store/src/subscription.rs +++ b/substrate/client/statement-store/src/subscription.rs @@ -42,11 +42,11 @@ const REPLAY_CHUNK_RAW_BYTES: usize = 4 * 1024 * 1024; const PENDING_LIVE_HARD_CAP: usize = 64 * 1024; const EMITTED_VIA_NEW_HARD_CAP: usize = 64 * 1024; -use futures::{Stream, StreamExt}; +use futures::{future::BoxFuture, Stream, StreamExt}; use itertools::Itertools; use parking_lot::Mutex; -use crate::{Store, LOG_TARGET}; +use crate::LOG_TARGET; use sc_utils::id_sequence::SeqID; use sp_core::{traits::SpawnNamed, Bytes, Encode}; pub use sp_statement_store::StatementStore; @@ -59,7 +59,10 @@ use std::{ sync::{atomic::AtomicU64, Arc}, task::Poll, }; -use tokio::sync::mpsc; +use tokio::sync::oneshot; + +type PendingNextEvent = BoxFuture<'static, Option>; +type PendingEventReply = oneshot::Sender>; /// Error returned when attaching a filter to a multi-filter subscription fails #[derive(Debug, Clone, PartialEq, Eq)] @@ -114,14 +117,21 @@ pub trait MultiFilterSubscriptionApi: Send + Sync { fn create_subscription(&self) -> (SubscriptionHandle, LiveEventStream); } +/// Provides replay snapshots while preserving the snapshot/register critical section +pub(crate) trait ReplaySnapshotProvider: Send + Sync { + fn register_filter_with_snapshot( + &self, + filter: &OptimizedTopicFilter, + register: &mut dyn FnMut(Vec>), + ) -> Result<()>; +} + /// A handle that attaches, removes, and inspects filters for one multi-filter subscription #[derive(Clone)] pub struct SubscriptionHandle { pub(crate) sub_id: SeqID, pub(crate) inner: Arc>, - pub(crate) store: Arc, pub(crate) matchers: SubscriptionsMatchersHandlers, - pub(crate) control_tx: mpsc::UnboundedSender, } pub(crate) struct SubscriptionHandleState { @@ -147,23 +157,16 @@ impl SubscriptionHandle { } let filter_id = FilterId::new(inner.next_filter_id); - let control_tx = self.control_tx.clone(); - let matchers = self.matchers.clone(); let sub_id = self.sub_id; - let filter_to_register = filter.clone(); - let mut registered = false; - let mut register = |snapshot| { - inner.next_filter_id = inner.next_filter_id.wrapping_add(1); - inner.active_filter_ids.insert(filter_id); - let _ = control_tx.send(SubscriptionControlMessage::AddFilter { filter_id, snapshot }); - matchers.send_by_seq_id( - sub_id, - MatcherMessage::AddFilter { sub_id, filter_id, filter: filter_to_register.clone() }, - ); - registered = true; - }; - self.store.register_filter_with_snapshot(&filter, &mut register)?; - assert!(registered, "register_filter_with_snapshot always calls register; qed"); + self.matchers + .try_send_by_seq_id(sub_id, MatcherMessage::AddFilter { sub_id, filter_id, filter }) + .map_err(|_| { + AddFilterError::Store(sp_statement_store::Error::InvalidConfig( + "statement subscription matcher stopped".into(), + )) + })?; + inner.next_filter_id = inner.next_filter_id.wrapping_add(1); + inner.active_filter_ids.insert(filter_id); Ok(filter_id) } @@ -173,7 +176,6 @@ impl SubscriptionHandle { if !inner.active_filter_ids.remove(&filter_id) { return false; } - let _ = self.control_tx.send(SubscriptionControlMessage::RemoveFilter { filter_id }); self.matchers.send_by_seq_id( self.sub_id, MatcherMessage::RemoveFilter { sub_id: self.sub_id, filter_id }, @@ -198,11 +200,6 @@ struct PendingLiveStatement { matched_filter_ids: Vec, } -pub(crate) enum SubscriptionControlMessage { - AddFilter { filter_id: FilterId, snapshot: Vec> }, - RemoveFilter { filter_id: FilterId }, -} - pub(crate) struct MultiFilterSubscriptionState { active_filter_ids: HashSet, replays_in_progress: HashSet, @@ -239,42 +236,29 @@ impl MultiFilterSubscriptionState { .push_back(PendingReplay { filter_id, snapshot: snapshot.into() }); } - fn record_filter_removed(&mut self, filter_id: FilterId) -> bool { - if !self.active_filter_ids.remove(&filter_id) { - return false; - } + fn record_filter_removed(&mut self, filter_id: FilterId) { + self.active_filter_ids.remove(&filter_id); self.replays_in_progress.remove(&filter_id); self.pending_replays.retain(|replay| replay.filter_id != filter_id); self.replayed_filter_ids_by_hash.retain(|_hash, set| { set.remove(&filter_id); !set.is_empty() }); - true - } - - fn handle_control_message(&mut self, message: SubscriptionControlMessage) { - match message { - SubscriptionControlMessage::AddFilter { filter_id, snapshot } => { - self.record_filter_added(filter_id, snapshot); - }, - SubscriptionControlMessage::RemoveFilter { filter_id } => { - let _ = self.record_filter_removed(filter_id); - }, - } } fn next_event(&mut self) -> Option { - if self.stopped { - if self.stop_emitted { - return None; + if !self.stopped { + if let Some(event) = self.next_replay_event().or_else(|| self.next_pending_live_event()) + { + return Some(event); } + } + + if self.stopped && !self.stop_emitted { self.stop_emitted = true; return Some(MultiFilterSubscriptionEvent::Stop); } - if let Some(event) = self.next_replay_event() { - return Some(event); - } - self.next_pending_live_event() + None } fn next_replay_event(&mut self) -> Option { @@ -323,32 +307,20 @@ impl MultiFilterSubscriptionState { None } - fn handle_live_event( - &mut self, - event: LiveStatementEvent, - ) -> Option { - let matched_filters = self.active_matched_filters(&event.matched_filter_ids); - if matched_filters.is_empty() { - return None; - } - if matched_filters.iter().any(|f| self.replays_in_progress.contains(f)) { - if self.pending_live.len() >= PENDING_LIVE_HARD_CAP { - log::warn!( - target: LOG_TARGET, - "pending_live cap reached on statement subscription; sending stop", - ); - self.stopped = true; - } else { - self.pending_live.push_back(PendingLiveStatement { - hash: event.hash, - encoded: event.encoded, - matched_filter_ids: event.matched_filter_ids, - }); - } - return None; + fn handle_live_event(&mut self, event: LiveStatementEvent) { + if self.pending_live.len() >= PENDING_LIVE_HARD_CAP { + log::warn!( + target: LOG_TARGET, + "pending_live cap reached on statement subscription; sending stop", + ); + self.stopped = true; + return; } - - self.new_statement_event(event.hash, event.encoded, &matched_filters) + self.pending_live.push_back(PendingLiveStatement { + hash: event.hash, + encoded: event.encoded, + matched_filter_ids: event.matched_filter_ids, + }); } fn new_statement_event( @@ -421,19 +393,9 @@ pub enum MultiFilterSubscriptionEvent { /// Stream of multi-filter subscription events pub struct LiveEventStream { - rx: async_channel::Receiver, - control_rx: mpsc::UnboundedReceiver, - state: MultiFilterSubscriptionState, sub_id: SeqID, matchers: SubscriptionsMatchersHandlers, -} - -impl LiveEventStream { - fn drain_ready_control_messages(&mut self) { - while let Ok(message) = self.control_rx.try_recv() { - self.state.handle_control_message(message); - } - } + pending_next_event: Option, } impl Stream for LiveEventStream { @@ -445,26 +407,25 @@ impl Stream for LiveEventStream { ) -> std::task::Poll> { let this = self.get_mut(); loop { - if let Some(event) = this.state.next_event() { - return Poll::Ready(Some(event)); - } - match this.control_rx.poll_recv(cx) { - Poll::Ready(Some(message)) => { - this.state.handle_control_message(message); - continue; - }, - Poll::Ready(None) | Poll::Pending => {}, - } - match this.rx.poll_next_unpin(cx) { - Poll::Ready(Some(event)) => { - this.drain_ready_control_messages(); - if let Some(event) = this.state.handle_live_event(event) { - return Poll::Ready(Some(event)); - } - }, - Poll::Ready(None) => return Poll::Ready(None), - Poll::Pending => return Poll::Pending, + if let Some(pending_next_event) = this.pending_next_event.as_mut() { + return match pending_next_event.as_mut().poll(cx) { + Poll::Ready(event) => { + this.pending_next_event = None; + Poll::Ready(event) + }, + Poll::Pending => Poll::Pending, + }; } + + let (tx, rx) = oneshot::channel(); + let message = MatcherMessage::RequestNext { sub_id: this.sub_id, reply: tx }; + let matcher_tx = this.matchers.sender_by_seq_id(this.sub_id); + this.pending_next_event = Some(Box::pin(async move { + if matcher_tx.send(message).await.is_err() { + return None; + } + rx.await.unwrap_or(None) + })); } } } @@ -477,18 +438,19 @@ impl Drop for LiveEventStream { } /// Messages sent to matcher tasks. -#[derive(Clone, Debug)] pub enum MatcherMessage { /// A new statement has been submitted. NewStatement(Statement), /// A new subscription has been created. Subscribe { info: IndexedSubscription, tx: async_channel::Sender }, /// A new multi-filter subscription has been created - SubscribeEmpty { seq_id: SeqID, tx: async_channel::Sender }, + SubscribeEmpty { seq_id: SeqID, snapshot_provider: Arc }, /// Add a filter to an existing multi-filter subscription AddFilter { sub_id: SeqID, filter_id: FilterId, filter: OptimizedTopicFilter }, /// Remove a filter from an existing multi-filter subscription RemoveFilter { sub_id: SeqID, filter_id: FilterId }, + /// Request the next event for a live subscription + RequestNext { sub_id: SeqID, reply: PendingEventReply }, /// Unsubscribe the subscription with the given ID. Unsubscribe(SeqID), } @@ -532,8 +494,8 @@ impl SubscriptionsHandle { Ok(MatcherMessage::Subscribe { info, tx }) => { subscriptions.subscribe(info, tx); }, - Ok(MatcherMessage::SubscribeEmpty { seq_id, tx }) => { - subscriptions.subscribe_empty(seq_id, tx); + Ok(MatcherMessage::SubscribeEmpty { seq_id, snapshot_provider }) => { + subscriptions.subscribe_empty(seq_id, snapshot_provider); }, Ok(MatcherMessage::AddFilter { sub_id, filter_id, filter }) => { subscriptions.add_filter(sub_id, filter_id, filter); @@ -541,6 +503,9 @@ impl SubscriptionsHandle { Ok(MatcherMessage::RemoveFilter { sub_id, filter_id }) => { subscriptions.remove_filter(sub_id, filter_id); }, + Ok(MatcherMessage::RequestNext { sub_id, reply }) => { + subscriptions.request_next(sub_id, reply); + }, Ok(MatcherMessage::Unsubscribe(seq_id)) => { subscriptions.unsubscribe(seq_id); }, @@ -601,25 +566,21 @@ impl SubscriptionsHandle { pub(crate) fn subscribe_empty( &self, - ) -> (SeqID, mpsc::UnboundedSender, LiveEventStream) { + snapshot_provider: Arc, + ) -> (SeqID, LiveEventStream) { let sub_id = self.next_id(); - let (tx, rx) = async_channel::bounded(SUBSCRIPTION_BUFFER_SIZE); - let (control_tx, control_rx) = tokio::sync::mpsc::unbounded_channel(); - self.matchers - .send_by_seq_id(sub_id, MatcherMessage::SubscribeEmpty { seq_id: sub_id, tx }); - - let stream = LiveEventStream { - rx, - control_rx, - state: MultiFilterSubscriptionState::new(), + self.matchers.send_by_seq_id( sub_id, - matchers: self.matchers.clone(), - }; - (sub_id, control_tx, stream) + MatcherMessage::SubscribeEmpty { seq_id: sub_id, snapshot_provider }, + ); + + let stream = + LiveEventStream { sub_id, matchers: self.matchers.clone(), pending_next_event: None }; + (sub_id, stream) } pub(crate) fn notify(&self, statement: Statement) { - self.matchers.send_all(MatcherMessage::NewStatement(statement)); + self.matchers.send_all(statement); } pub(crate) fn matchers(&self) -> SubscriptionsMatchersHandlers { @@ -639,8 +600,10 @@ enum SubscriptionRecord { filter: OptimizedTopicFilter, }, Live { - tx: async_channel::Sender, filters: HashMap, + state: MultiFilterSubscriptionState, + pending_request: Option, + snapshot_provider: Arc, }, } @@ -707,39 +670,110 @@ impl SubscriptionsInfo { self.index_filter(subscription_info); } - fn subscribe_empty(&mut self, seq_id: SeqID, tx: async_channel::Sender) { - self.by_sub_id - .insert(seq_id, SubscriptionRecord::Live { tx, filters: HashMap::new() }); + fn subscribe_empty( + &mut self, + seq_id: SeqID, + snapshot_provider: Arc, + ) { + self.by_sub_id.insert( + seq_id, + SubscriptionRecord::Live { + filters: HashMap::new(), + state: MultiFilterSubscriptionState::new(), + pending_request: None, + snapshot_provider, + }, + ); } fn add_filter(&mut self, sub_id: SeqID, filter_id: FilterId, filter: OptimizedTopicFilter) { let filter_key = SubscriptionFilterKey::Dynamic(filter_id); + let Some(SubscriptionRecord::Live { snapshot_provider, .. }) = self.by_sub_id.get(&sub_id) + else { + return; + }; + let provider = snapshot_provider.clone(); + let result = provider.register_filter_with_snapshot(&filter, &mut |snapshot| { + let Some(record) = self.by_sub_id.get_mut(&sub_id) else { + return; + }; + let SubscriptionRecord::Live { filters, state, .. } = record else { + return; + }; + let Entry::Vacant(entry) = filters.entry(filter_id) else { + return; + }; + entry.insert(filter.clone()); + state.record_filter_added(filter_id, snapshot); + self.index_filter(IndexedSubscription { + topic_filter: filter.clone(), + seq_id: sub_id, + filter_key, + }); + }); + + if let Err(error) = result { + log::error!( + target: LOG_TARGET, + "Failed to collect replay snapshot for statement subscription {:?}: {:?}", + sub_id, + error, + ); + if let Some(SubscriptionRecord::Live { state, .. }) = self.by_sub_id.get_mut(&sub_id) { + state.stopped = true; + } + } + + self.wake_pending_request(sub_id); + } + + fn request_next(&mut self, sub_id: SeqID, reply: PendingEventReply) { let Some(record) = self.by_sub_id.get_mut(&sub_id) else { + let _ = reply.send(None); return; }; - let SubscriptionRecord::Live { filters, .. } = record else { + let SubscriptionRecord::Live { state, pending_request, .. } = record else { + let _ = reply.send(None); return; }; - let Entry::Vacant(entry) = filters.entry(filter_id) else { return }; - entry.insert(filter.clone()); - - self.index_filter(IndexedSubscription { topic_filter: filter, seq_id: sub_id, filter_key }); + if let Some(event) = state.next_event() { + let _ = reply.send(Some(event)); + } else if state.stopped && state.stop_emitted { + let _ = reply.send(None); + } else if pending_request.is_none() { + *pending_request = Some(reply); + } else { + let _ = reply.send(None); + } } fn remove_filter(&mut self, sub_id: SeqID, filter_id: FilterId) { let Some(record) = self.by_sub_id.get_mut(&sub_id) else { return; }; - let SubscriptionRecord::Live { filters, .. } = record else { + let SubscriptionRecord::Live { filters, state, .. } = record else { return; }; let Some(filter) = filters.remove(&filter_id) else { return; }; - + state.record_filter_removed(filter_id); self.remove_indexed_filter(sub_id, SubscriptionFilterKey::Dynamic(filter_id), &filter); } + fn wake_pending_request(&mut self, sub_id: SeqID) { + let Some(record) = self.by_sub_id.get_mut(&sub_id) else { + return; + }; + let SubscriptionRecord::Live { pending_request, .. } = record else { + return; + }; + let Some(reply) = pending_request.take() else { + return; + }; + self.request_next(sub_id, reply); + } + fn index_filter(&mut self, subscription_info: IndexedSubscription) { let index_key = (subscription_info.seq_id, subscription_info.filter_key); match &subscription_info.topic_filter { @@ -775,11 +809,13 @@ impl SubscriptionsInfo { let mut needs_unsubscribing = HashSet::new(); for (sub_id, matched) in matches { - match (matched, self.by_sub_id.get(&sub_id)) { - ( - MatchedSubscription::Statements, - Some(SubscriptionRecord::Statements { tx, .. }), - ) => { + match matched { + MatchedSubscription::Statements => { + let Some(SubscriptionRecord::Statements { tx, .. }) = + self.by_sub_id.get(&sub_id) + else { + continue; + }; if let Err(err) = tx.try_send(StatementEvent::NewStatements { statements: vec![bytes_to_send.clone()], remaining: None, @@ -791,21 +827,18 @@ impl SubscriptionsInfo { needs_unsubscribing.insert(sub_id); } }, - ( - MatchedSubscription::Live(filter_ids), - Some(SubscriptionRecord::Live { tx, .. }), - ) if !filter_ids.is_empty() => { - if let Err(err) = tx.try_send(LiveStatementEvent { + MatchedSubscription::Live(filter_ids) if !filter_ids.is_empty() => { + let Some(SubscriptionRecord::Live { state, .. }) = + self.by_sub_id.get_mut(&sub_id) + else { + continue; + }; + state.handle_live_event(LiveStatementEvent { hash: statement.hash(), encoded: encoded.clone(), matched_filter_ids: filter_ids.into_iter().collect(), - }) { - log::debug!( - target: LOG_TARGET, - "Failed to send statement to subscriber {:?}: {:?} unsubscribing it", sub_id, err - ); - needs_unsubscribing.insert(sub_id); - } + }); + self.wake_pending_request(sub_id); }, _ => {}, } @@ -973,11 +1006,9 @@ impl SubscriptionsMatchersHandlers { // Send a message to the matcher task responsible for the given subscription ID. fn send_by_seq_id(&self, id: SeqID, message: MatcherMessage) { - let index: u64 = id.into(); // If matchers channels are full we backpressure the sender, in this case it will be the // processing of new statements. - if let Err(err) = self.matchers[index as usize % self.matchers.len()].send_blocking(message) - { + if let Err(err) = self.try_send_by_seq_id(id, message) { log::error!( target: LOG_TARGET, "Failed to send statement to matcher task: {:?}", err @@ -985,10 +1016,24 @@ impl SubscriptionsMatchersHandlers { } } + fn try_send_by_seq_id( + &self, + id: SeqID, + message: MatcherMessage, + ) -> std::result::Result<(), async_channel::SendError> { + self.sender_by_seq_id(id).send_blocking(message) + } + + fn sender_by_seq_id(&self, id: SeqID) -> async_channel::Sender { + let index: u64 = id.into(); + self.matchers[index as usize % self.matchers.len()].clone() + } + // Send a message to all matcher tasks. - fn send_all(&self, message: MatcherMessage) { + fn send_all(&self, statement: Statement) { for sender in &self.matchers { - if let Err(err) = sender.send_blocking(message.clone()) { + if let Err(err) = sender.send_blocking(MatcherMessage::NewStatement(statement.clone())) + { log::error!( target: LOG_TARGET, "Failed to send message to matcher task: {:?}", err @@ -1078,9 +1123,7 @@ mod tests { state.record_filter_added(replay_filter, vec![encoded.clone()]); state.active_filter_ids.insert(live_filter); - assert!(state - .handle_live_event(live_event_for(&statement, vec![replay_filter, live_filter])) - .is_none()); + state.handle_live_event(live_event_for(&statement, vec![replay_filter, live_filter])); assert_eq!(state.pending_live.len(), 1); match state.next_event() { @@ -1118,7 +1161,7 @@ mod tests { encoded: vec![i as u8], matched_filter_ids: vec![filter_id], }; - assert!(state.handle_live_event(event).is_none()); + state.handle_live_event(event); } assert_eq!(state.pending_live.len(), PENDING_LIVE_HARD_CAP); @@ -1127,29 +1170,24 @@ mod tests { encoded: vec![0], matched_filter_ids: vec![filter_id], }; - assert!(state.handle_live_event(overflow).is_none()); + state.handle_live_event(overflow); assert!(matches!(state.next_event(), Some(MultiFilterSubscriptionEvent::Stop))); assert!(state.next_event().is_none()); } #[test] - fn multi_filter_control_add_registers_replay_before_live_events() { + fn multi_filter_add_registers_replay_before_live_events() { let mut state = MultiFilterSubscriptionState::new(); let filter_id = FilterId::new(7); let snapshot = vec![vec![1, 2, 3]]; - state.handle_control_message(SubscriptionControlMessage::AddFilter { - filter_id, - snapshot: snapshot.clone(), - }); + state.record_filter_added(filter_id, snapshot.clone()); - assert!(state - .handle_live_event(LiveStatementEvent { - hash: sp_statement_store::hash_encoded(&snapshot[0]), - encoded: snapshot[0].clone(), - matched_filter_ids: vec![filter_id], - }) - .is_none()); + state.handle_live_event(LiveStatementEvent { + hash: sp_statement_store::hash_encoded(&snapshot[0]), + encoded: snapshot[0].clone(), + matched_filter_ids: vec![filter_id], + }); assert!(matches!( state.next_event(), @@ -1165,38 +1203,6 @@ mod tests { assert!(state.next_event().is_none()); } - #[tokio::test] - async fn live_stream_processes_filter_control_before_later_live_match() { - let filter_id = FilterId::new(7); - let live_statement = signed_statement(7); - let (tx, rx) = async_channel::bounded(SUBSCRIPTION_BUFFER_SIZE); - let (control_tx, control_rx) = tokio::sync::mpsc::unbounded_channel(); - let (matcher_tx, _matcher_rx) = async_channel::bounded(MATCHERS_TASK_CHANNEL_BUFFER_SIZE); - let mut stream = LiveEventStream { - rx, - control_rx, - state: MultiFilterSubscriptionState::new(), - sub_id: SeqID::from(0), - matchers: SubscriptionsMatchersHandlers::new(vec![matcher_tx]), - }; - - control_tx - .send(SubscriptionControlMessage::AddFilter { filter_id, snapshot: Vec::new() }) - .unwrap(); - tx.send(live_event_for(&live_statement, vec![filter_id])).await.unwrap(); - - assert!(matches!( - stream.next().await, - Some(MultiFilterSubscriptionEvent::ReplayDone { filter_id: id }) if id == filter_id - )); - assert!(matches!( - stream.next().await, - Some(MultiFilterSubscriptionEvent::NewStatement(event)) - if event.hash == live_statement.hash() && - event.matched_filter_ids == vec![filter_id] - )); - } - #[test] fn multi_filter_new_statement_history_cap_stops_subscription() { let mut state = MultiFilterSubscriptionState::new(); @@ -1209,8 +1215,9 @@ mod tests { encoded: vec![i as u8], matched_filter_ids: vec![filter_id], }; + state.handle_live_event(event); assert!(matches!( - state.handle_live_event(event), + state.next_event(), Some(MultiFilterSubscriptionEvent::NewStatement(_)) )); } @@ -1220,7 +1227,7 @@ mod tests { encoded: vec![0], matched_filter_ids: vec![filter_id], }; - assert!(state.handle_live_event(overflow).is_none()); + state.handle_live_event(overflow); assert!(matches!(state.next_event(), Some(MultiFilterSubscriptionEvent::Stop))); assert!(state.next_event().is_none()); } From 0d042555d8baa7344f70eb674851b3917b32cf07 Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Fri, 15 May 2026 15:24:00 +0100 Subject: [PATCH 15/16] statement-store: trim unstable subscription cleanup --- .../tests/zombie_ci/statement_store/common.rs | 47 +---------- .../zombie_ci/statement_store/integration.rs | 8 +- .../rpc-spec-v2/src/statement/statement.rs | 20 ++--- .../rpc-spec-v2/src/statement/subscription.rs | 79 +++++-------------- substrate/client/statement-store/src/lib.rs | 1 - .../statement-store/src/subscription.rs | 32 +++----- 6 files changed, 44 insertions(+), 143 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs index baceae3e9e1e4..325e22cc15f98 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/common.rs @@ -17,7 +17,7 @@ use sp_statement_store::{ }; use std::{ path::{Path, PathBuf}, - time::{Duration, Instant}, + time::Duration, }; use zombienet_sdk::{ subxt::{ @@ -227,13 +227,6 @@ pub(super) fn create_chain_spec_with_allowances( participant_count: u32, base_dir: &Path, ) -> Result { - let started_at = Instant::now(); - info!( - "Creating statement allowance chain spec: participants={}, base_dir={}", - participant_count, - base_dir.display(), - ); - let chain_spec_template = include_str!("people-westend-local-spec.json"); let mut chain_spec: serde_json::Value = serde_json::from_str(chain_spec_template) .map_err(|e| anyhow!("Failed to parse chain spec JSON: {}", e))?; @@ -248,15 +241,6 @@ pub(super) fn create_chain_spec_with_allowances( let allowance_hex = format!("0x{}", HexDisplay::from(&allowance.encode())); info!("Injecting statement allowance: {:}", allowance_hex); for idx in 0..participant_count { - if idx > 0 && idx % 5_000 == 0 { - info!( - "Injected statement allowances: {}/{} elapsed_s={}", - idx, - participant_count, - started_at.elapsed().as_secs(), - ); - } - let keypair = get_keypair(idx); let account_id = keypair.public(); @@ -265,25 +249,13 @@ pub(super) fn create_chain_spec_with_allowances( genesis.insert(storage_key_hex, serde_json::Value::String(allowance_hex.clone())); } - info!( - "Finished injecting statement allowances: {}/{} elapsed_s={}", - participant_count, - participant_count, - started_at.elapsed().as_secs(), - ); let chain_spec_path = base_dir.join("people-westend-custom.json"); - info!("Serializing chain spec: {}", chain_spec_path.display()); let chain_spec_json = serde_json::to_string_pretty(&chain_spec) .map_err(|e| anyhow!("Failed to serialize chain spec: {}", e))?; std::fs::write(&chain_spec_path, chain_spec_json) .map_err(|e| anyhow!("Failed to write chain spec to file: {}", e))?; - info!( - "Wrote chain spec: {} elapsed_s={}", - chain_spec_path.display(), - started_at.elapsed().as_secs(), - ); Ok(chain_spec_path) } @@ -326,12 +298,6 @@ async fn launch_network( ) -> Result, anyhow::Error> { let images = zombienet_sdk::environment::get_images_from_env(); let base_dir = base_dir()?; - info!( - "Preparing zombienet network config: collators={}, chain_spec={}, base_dir={}", - collators.len(), - chain_spec_path.display(), - base_dir.display(), - ); let config = NetworkConfigBuilder::new() .with_relaychain(|r| { @@ -363,12 +329,8 @@ async fn launch_network( .build() .map_err(format_build_errors)?; - info!("Initialising zombienet network"); let network = crate::utils::initialize_network(config).await?; - info!("Waiting for zombienet network to be up"); - let wait_result = network.wait_until_is_up(60).await; - info!("Zombienet network wait finished: ok={}", wait_result.is_ok()); - assert!(wait_result.is_ok()); + assert!(network.wait_until_is_up(60).await.is_ok()); Ok(network) } @@ -393,11 +355,6 @@ pub(super) async fn spawn_network_with_injected_allowances( ) -> Result, anyhow::Error> { assert!(!collators.is_empty()); let base_dir = base_dir()?; - info!( - "Spawning injected allowance network: collators={}, participants={}", - collators.len(), - participant_count, - ); let chain_spec_path = create_chain_spec_with_allowances(participant_count, &base_dir)?; let args = collator_args(participant_count, COLLATOR_TRACE_LOG_FILTER); launch_network(collators, &chain_spec_path, args).await diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs index ca926a7ff24ea..4f098dafbd37d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/statement_store/integration.rs @@ -57,16 +57,16 @@ async fn expect_one_unstable_statement( timeout_secs: u64, ) -> Result { loop { - return match expect_unstable_event(subscription, timeout_secs).await? { - UnstableStatementEvent::NewStatements { statements } => { + match expect_unstable_event(subscription, timeout_secs).await? { + UnstableStatementEvent::NewStatements { mut statements } => { if statements.is_empty() { continue; } assert_eq!(statements.len(), 1); - Ok(statements.into_iter().next().unwrap()) + return Ok(statements.remove(0)); }, event => anyhow::bail!("Expected unstable newStatements event, got {:?}", event), - }; + } } } diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs index db4dc3492c238..03d600aed6800 100644 --- a/substrate/client/rpc-spec-v2/src/statement/statement.rs +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -22,8 +22,7 @@ use crate::{ error::Error, event::AddFilterResponse, subscription::{ - add_filter_sync, parse_filter_id, remove_filter_sync, run_subscription_task, - AddFilterOutcome, StatementSubscriptions, + filter_id_to_string, parse_filter_id, run_subscription_task, StatementSubscriptions, }, LOG_TARGET, }, @@ -35,7 +34,7 @@ use jsonrpsee::{ core::async_trait, types::SubscriptionId, ConnectionId, Extensions, PendingSubscriptionSink, }; use sc_rpc::utils::Subscription; -use sc_statement_store::MultiFilterSubscriptionApi; +use sc_statement_store::{AddFilterError, MultiFilterSubscriptionApi}; use sp_core::Bytes; use sp_statement_store::{ OptimizedTopicFilter, Statement, StatementSource, StatementStore, SubmitOutcome, SubmitResult, @@ -64,7 +63,7 @@ where fn read_subscription_id_as_string(sink: &Subscription) -> String { match sink.subscription_id() { SubscriptionId::Num(n) => n.to_string(), - SubscriptionId::Str(s) => s.into_owned().into(), + SubscriptionId::Str(s) => s.into_owned(), } } @@ -132,11 +131,12 @@ where return Err(Error::InvalidSubscription); }; - match add_filter_sync(&state, topic_filter)? { - AddFilterOutcome::Added(filter_id) => Ok(AddFilterResponse::Ok( - crate::statement::subscription::filter_id_to_string(filter_id), - )), - AddFilterOutcome::LimitReached => Ok(AddFilterResponse::limit_reached()), + match state.add_filter(topic_filter) { + Ok(filter_id) => Ok(AddFilterResponse::Ok(filter_id_to_string(filter_id))), + Err(AddFilterError::LimitReached) => Ok(AddFilterResponse::limit_reached()), + Err(AddFilterError::Stopped) => { + Err(Error::InternalError("statement subscription matcher stopped".into())) + }, } } @@ -149,7 +149,7 @@ where let conn_id = connection_id(ext); let Some(state) = self.subscriptions.get(conn_id, &subscription) else { return Ok(()) }; let Some(parsed) = parse_filter_id(&filter_id) else { return Ok(()) }; - let _ = remove_filter_sync(&state, parsed); + let _ = state.remove_filter(parsed); Ok(()) } diff --git a/substrate/client/rpc-spec-v2/src/statement/subscription.rs b/substrate/client/rpc-spec-v2/src/statement/subscription.rs index 0382ec98ac8c9..c5c71afc1ac01 100644 --- a/substrate/client/rpc-spec-v2/src/statement/subscription.rs +++ b/substrate/client/rpc-spec-v2/src/statement/subscription.rs @@ -16,28 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::statement::{ - error::Error, - event::{NewStatementEntry, SubscribeEvent}, -}; +use crate::statement::event::{NewStatementEntry, SubscribeEvent}; use futures::StreamExt; use jsonrpsee::ConnectionId; use parking_lot::RwLock; use sc_rpc::utils::Subscription; -use sc_statement_store::{ - AddFilterError, LiveEventStream, MultiFilterSubscriptionEvent, SubscriptionHandle, -}; -use sp_statement_store::{FilterId, OptimizedTopicFilter}; +use sc_statement_store::{LiveEventStream, MultiFilterSubscriptionEvent, SubscriptionHandle}; +use sp_statement_store::FilterId; use std::{collections::HashMap, sync::Arc, time::Duration}; use tokio::sync::Notify; const SUBSCRIPTION_REGISTRATION_TIMEOUT: Duration = Duration::from_millis(250); -pub(crate) enum AddFilterOutcome { - Added(FilterId), - LimitReached, -} - type SubscriptionStateRef = Arc; type SubscriptionRegistry = Arc>>>; @@ -126,23 +116,6 @@ impl Drop for SubscriptionEntry { } } -pub(crate) fn add_filter_sync( - state: &Arc, - filter: OptimizedTopicFilter, -) -> Result { - match state.add_filter(filter) { - Ok(filter_id) => Ok(AddFilterOutcome::Added(filter_id)), - Err(AddFilterError::LimitReached) => Ok(AddFilterOutcome::LimitReached), - Err(AddFilterError::Store(e)) => { - Err(Error::InternalError(format!("add_filter failed: {e}"))) - }, - } -} - -pub(crate) fn remove_filter_sync(state: &Arc, filter_id: FilterId) -> bool { - state.remove_filter(filter_id) -} - pub(crate) fn filter_id_to_string(id: FilterId) -> String { id.as_u64().to_string() } @@ -162,50 +135,36 @@ async fn send_subscription_event(sink: &Subscription, event: MultiFilterSubscrip match event { MultiFilterSubscriptionEvent::ReplayStatements { filter_id, statements } => { let statements = statements.into_iter().map(sp_core::Bytes).collect(); - send_event( - sink, - &SubscribeEvent::ReplayStatements { - filter_id: filter_id_to_string(filter_id), - statements, - }, - ) + sink.send(&SubscribeEvent::ReplayStatements { + filter_id: filter_id_to_string(filter_id), + statements, + }) .await + .is_ok() }, - MultiFilterSubscriptionEvent::ReplayDone { filter_id } => { - send_event( - sink, - &SubscribeEvent::ReplayDone { filter_id: filter_id_to_string(filter_id) }, - ) + MultiFilterSubscriptionEvent::ReplayDone { filter_id } => sink + .send(&SubscribeEvent::ReplayDone { filter_id: filter_id_to_string(filter_id) }) .await - }, + .is_ok(), MultiFilterSubscriptionEvent::NewStatement(event) => { let filter_ids = event.matched_filter_ids.into_iter().map(filter_id_to_string).collect(); - send_event( - sink, - &SubscribeEvent::NewStatements { - statements: vec![NewStatementEntry { - statement: sp_core::Bytes(event.encoded), - filter_ids, - }], - }, - ) + sink.send(&SubscribeEvent::NewStatements { + statements: vec![NewStatementEntry { + statement: sp_core::Bytes(event.encoded), + filter_ids, + }], + }) .await + .is_ok() }, MultiFilterSubscriptionEvent::Stop => { - let _ = send_event(sink, &SubscribeEvent::Stop).await; + let _ = sink.send(&SubscribeEvent::Stop).await; false }, } } -async fn send_event(sink: &Subscription, event: &SubscribeEvent) -> bool { - match sink.send(event).await { - Ok(()) => true, - Err(_) => false, - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/substrate/client/statement-store/src/lib.rs b/substrate/client/statement-store/src/lib.rs index 65774aef229d5..76a38371fd3d6 100644 --- a/substrate/client/statement-store/src/lib.rs +++ b/substrate/client/statement-store/src/lib.rs @@ -1661,7 +1661,6 @@ impl Store { |statement| Some(statement.encode()), )?; register(existing_statements); - drop(index); Ok(()) } } diff --git a/substrate/client/statement-store/src/subscription.rs b/substrate/client/statement-store/src/subscription.rs index 59b6902e1a1c9..15b93e28e1063 100644 --- a/substrate/client/statement-store/src/subscription.rs +++ b/substrate/client/statement-store/src/subscription.rs @@ -69,14 +69,8 @@ type PendingEventReply = oneshot::Sender>; pub enum AddFilterError { /// The subscription already has the maximum number of active filters LimitReached, - /// The store failed while collecting the replay snapshot - Store(sp_statement_store::Error), -} - -impl From for AddFilterError { - fn from(error: sp_statement_store::Error) -> Self { - AddFilterError::Store(error) - } + /// The matcher stopped before the filter request could be queued + Stopped, } impl std::fmt::Display for AddFilterError { @@ -85,19 +79,12 @@ impl std::fmt::Display for AddFilterError { AddFilterError::LimitReached => { write!(f, "maximum number of filters for this subscription has been reached") }, - AddFilterError::Store(error) => error.fmt(f), + AddFilterError::Stopped => write!(f, "statement subscription matcher stopped"), } } } -impl std::error::Error for AddFilterError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - AddFilterError::LimitReached => None, - AddFilterError::Store(error) => Some(error), - } - } -} +impl std::error::Error for AddFilterError {} /// Trait for initiating statement store subscriptions from the RPC module. pub trait StatementStoreSubscriptionApi: Send + Sync { @@ -160,11 +147,7 @@ impl SubscriptionHandle { let sub_id = self.sub_id; self.matchers .try_send_by_seq_id(sub_id, MatcherMessage::AddFilter { sub_id, filter_id, filter }) - .map_err(|_| { - AddFilterError::Store(sp_statement_store::Error::InvalidConfig( - "statement subscription matcher stopped".into(), - )) - })?; + .map_err(|_| AddFilterError::Stopped)?; inner.next_filter_id = inner.next_filter_id.wrapping_add(1); inner.active_filter_ids.insert(filter_id); Ok(filter_id) @@ -424,7 +407,10 @@ impl Stream for LiveEventStream { if matcher_tx.send(message).await.is_err() { return None; } - rx.await.unwrap_or(None) + match rx.await { + Ok(event) => event, + Err(_) => None, + } })); } } From bcd3d6b4c4672432fa54716fcd84e78074294b34 Mon Sep 17 00:00:00 2001 From: DenzelPenzel Date: Fri, 15 May 2026 17:43:48 +0100 Subject: [PATCH 16/16] statement-store: inline subscription task loop --- .../rpc-spec-v2/src/statement/statement.rs | 12 ++++++++---- .../rpc-spec-v2/src/statement/subscription.rs | 16 +++++----------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/statement/statement.rs b/substrate/client/rpc-spec-v2/src/statement/statement.rs index 03d600aed6800..8b9b6e3e0b1d0 100644 --- a/substrate/client/rpc-spec-v2/src/statement/statement.rs +++ b/substrate/client/rpc-spec-v2/src/statement/statement.rs @@ -22,14 +22,14 @@ use crate::{ error::Error, event::AddFilterResponse, subscription::{ - filter_id_to_string, parse_filter_id, run_subscription_task, StatementSubscriptions, + filter_id_to_string, parse_filter_id, send_subscription_event, StatementSubscriptions, }, LOG_TARGET, }, SubscriptionTaskExecutor, }; use codec::Decode; -use futures::FutureExt; +use futures::{FutureExt, StreamExt}; use jsonrpsee::{ core::async_trait, types::SubscriptionId, ConnectionId, Extensions, PendingSubscriptionSink, }; @@ -99,7 +99,7 @@ where let store = self.store.clone(); let connection_id = pending.connection_id(); - let (handle, live_stream) = store.create_subscription(); + let (handle, mut live_stream) = store.create_subscription(); let Ok(sink) = pending.accept().await.map(Subscription::from) else { return }; let sub_id = read_subscription_id_as_string(&sink); @@ -110,7 +110,11 @@ where }; let fut = async move { - run_subscription_task(sink, live_stream).await; + while let Some(event) = live_stream.next().await { + if !send_subscription_event(&sink, event).await { + break; + } + } drop(entry); }; diff --git a/substrate/client/rpc-spec-v2/src/statement/subscription.rs b/substrate/client/rpc-spec-v2/src/statement/subscription.rs index c5c71afc1ac01..57bb6ce56f512 100644 --- a/substrate/client/rpc-spec-v2/src/statement/subscription.rs +++ b/substrate/client/rpc-spec-v2/src/statement/subscription.rs @@ -17,11 +17,10 @@ // along with this program. If not, see . use crate::statement::event::{NewStatementEntry, SubscribeEvent}; -use futures::StreamExt; use jsonrpsee::ConnectionId; use parking_lot::RwLock; use sc_rpc::utils::Subscription; -use sc_statement_store::{LiveEventStream, MultiFilterSubscriptionEvent, SubscriptionHandle}; +use sc_statement_store::{MultiFilterSubscriptionEvent, SubscriptionHandle}; use sp_statement_store::FilterId; use std::{collections::HashMap, sync::Arc, time::Duration}; use tokio::sync::Notify; @@ -123,15 +122,10 @@ pub(crate) fn parse_filter_id(s: &str) -> Option { s.parse::().ok().map(FilterId::new) } -pub async fn run_subscription_task(sink: Subscription, mut live_stream: LiveEventStream) { - while let Some(event) = live_stream.next().await { - if !send_subscription_event(&sink, event).await { - return; - } - } -} - -async fn send_subscription_event(sink: &Subscription, event: MultiFilterSubscriptionEvent) -> bool { +pub(super) async fn send_subscription_event( + sink: &Subscription, + event: MultiFilterSubscriptionEvent, +) -> bool { match event { MultiFilterSubscriptionEvent::ReplayStatements { filter_id, statements } => { let statements = statements.into_iter().map(sp_core::Bytes).collect();