diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index c2b6599c13..0bd06703b3 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -6,7 +6,7 @@ pub use super::{ use crate::{ BlockLevel, KType, constants::STORAGE_MASS_PARAMETER, - mass::{BlockLaneLimits, BlockMassLimits}, + mass::{BlockLaneLimits, BlockMassLimits, MassCofactors}, network::{NetworkId, NetworkType}, }; use kaspa_addresses::Prefix; @@ -17,6 +17,15 @@ use std::{ ops::{Deref, DerefMut}, }; +const MEMPOOL_BLOCK_MASS_ACTIVATION_DELAY_SECONDS: u64 = 24 * 60 * 60; +const PRIOR_MAX_SIGNATURE_SCRIPT_LEN: usize = 10_000; +// Increased for stark proofs. This value is effectively covered by the post-Toccata +// transient block mass limit: 1_000_000 transient mass / 4 grams-per-byte = 250_000 +// bytes for the entire block, so a larger signature script cannot be accepted anyway. +// TODO(post-toccata): check whether this early signature-script length guard can be +// removed entirely, or whether it remains useful as cheap early protection. +const NEW_MAX_SIGNATURE_SCRIPT_LEN: usize = 250_000; + #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct ForkActivation(u64); @@ -47,6 +56,13 @@ impl ForkActivation { current_daa_score >= self.0 } + pub fn delayed_by(self, delay_daa_score: u64) -> Self { + match self.0 { + Self::ALWAYS | Self::NEVER => self, + daa_score => Self(daa_score.saturating_add(delay_daa_score)), + } + } + /// Checks if the fork was "recently" activated, i.e., in the time frame of the provided range. /// This function returns false for forks that were always active, since they were never activated. pub fn is_within_range_from_activation(self, current_daa_score: u64, range: u64) -> bool { @@ -85,6 +101,10 @@ impl ForkedParam { if self.activation.is_active(daa_score) { self.post } else { self.pre } } + pub fn with_delayed_activation(&self, delay_daa_score: u64) -> Self { + Self::new(self.pre, self.post, self.activation.delayed_by(delay_daa_score)) + } + /// Returns the value before activation (=pre unless activation = always) pub fn before(&self) -> T { match self.activation.0 { @@ -107,6 +127,12 @@ impl ForkedParam { } } +impl From for ForkedParam { + fn from(value: T) -> Self { + Self::new_const(value) + } +} + impl ForkedParam { /// Returns the min of `pre` and `post` values. Useful for non-consensus initializations /// which require knowledge of the value bounds. @@ -196,12 +222,14 @@ pub struct OverrideParams { pub max_tx_inputs: Option, pub max_tx_outputs: Option, - pub max_signature_script_len: Option, + pub prior_max_signature_script_len: Option, + pub new_max_signature_script_len: Option, pub max_script_public_key_len: Option, pub mass_per_tx_byte: Option, pub mass_per_script_pub_key_byte: Option, pub mass_per_sig_op: Option, - pub block_mass_limits: Option, + pub prior_block_mass_limits: Option, + pub new_transient_mass_limit: Option, pub block_lane_limits: Option, /// The parameter for scaling inverse KAS value to mass units (KIP-0009) @@ -239,12 +267,14 @@ impl From for OverrideParams { max_coinbase_payload_len: Some(p.max_coinbase_payload_len), max_tx_inputs: Some(p.max_tx_inputs), max_tx_outputs: Some(p.max_tx_outputs), - max_signature_script_len: Some(p.max_signature_script_len), + prior_max_signature_script_len: Some(p.prior_max_signature_script_len), + new_max_signature_script_len: Some(p.new_max_signature_script_len), max_script_public_key_len: Some(p.max_script_public_key_len), mass_per_tx_byte: Some(p.mass_per_tx_byte), mass_per_script_pub_key_byte: Some(p.mass_per_script_pub_key_byte), mass_per_sig_op: Some(p.mass_per_sig_op), - block_mass_limits: Some(p.block_mass_limits), + prior_block_mass_limits: Some(p.prior_block_mass_limits), + new_transient_mass_limit: Some(p.new_transient_mass_limit), block_lane_limits: Some(p.block_lane_limits), storage_mass_parameter: Some(p.storage_mass_parameter), deflationary_phase_daa_score: Some(p.deflationary_phase_daa_score), @@ -291,13 +321,15 @@ pub struct Params { pub max_tx_inputs: usize, pub max_tx_outputs: usize, - pub max_signature_script_len: usize, + pub prior_max_signature_script_len: usize, + pub new_max_signature_script_len: usize, pub max_script_public_key_len: usize, pub mass_per_tx_byte: u64, pub mass_per_script_pub_key_byte: u64, pub mass_per_sig_op: u64, - pub block_mass_limits: BlockMassLimits, + pub prior_block_mass_limits: BlockMassLimits, + pub new_transient_mass_limit: u64, pub block_lane_limits: BlockLaneLimits, /// The parameter for scaling inverse KAS value to mass units (KIP-0009) @@ -365,6 +397,68 @@ impl Params { ) } + /// Returns the forked per-dimension block mass limits. + #[inline] + #[must_use] + pub fn block_mass_limits(&self) -> ForkedParam { + let mut new_block_mass_limits = self.prior_block_mass_limits; + new_block_mass_limits.transient = self.new_transient_mass_limit; + ForkedParam::new(self.prior_block_mass_limits, new_block_mass_limits, self.covenants_activation) + } + + /// Returns the forked cofactors for normalizing block mass dimensions. + #[inline] + #[must_use] + pub fn block_mass_cofactors(&self) -> ForkedParam { + self.block_mass_limits().map(|limits| limits.cofactors()) + } + + /// Returns the block mass limits used for mempool policy. + /// + /// Mempool policy lags the consensus transient mass relaxation, so transactions + /// near activation are normalized by the stricter pre-activation limits. + #[inline] + #[must_use] + pub fn mempool_block_mass_limits(&self) -> ForkedParam { + let block_mass_limits = self.block_mass_limits(); + let prior_limits = block_mass_limits.before(); + let new_limits = block_mass_limits.after(); + assert_eq!( + new_limits.compute, prior_limits.compute, + "delaying mempool mass activation assumes the compute mass limit does not change" + ); + assert_eq!( + new_limits.storage, prior_limits.storage, + "delaying mempool mass activation assumes the storage mass limit does not change" + ); + assert!( + new_limits.transient >= prior_limits.transient, + "delaying mempool mass activation is only safe when the post-activation transient limit is not stricter" + ); + + block_mass_limits.with_delayed_activation(MEMPOOL_BLOCK_MASS_ACTIVATION_DELAY_SECONDS.saturating_mul(self.bps())) + } + + /// Returns the mempool policy cofactors for normalizing block mass dimensions. + #[inline] + #[must_use] + pub fn mempool_block_mass_cofactors(&self) -> ForkedParam { + let cofactors = self.mempool_block_mass_limits().map(|limits| limits.cofactors()); + assert_eq!( + cofactors.before().reference, + cofactors.after().reference, + "mempool mass normalization assumes the reference mass is stable across activation" + ); + cofactors + } + + /// Returns the forked maximum signature script length. + #[inline] + #[must_use] + pub fn max_signature_script_len(&self) -> ForkedParam { + ForkedParam::new(self.prior_max_signature_script_len, self.new_max_signature_script_len, self.covenants_activation) + } + pub fn ghostdag_k(&self) -> KType { self.blockrate.ghostdag_k } @@ -462,12 +556,14 @@ impl Params { max_tx_inputs: overrides.max_tx_inputs.unwrap_or(self.max_tx_inputs), max_tx_outputs: overrides.max_tx_outputs.unwrap_or(self.max_tx_outputs), - max_signature_script_len: overrides.max_signature_script_len.unwrap_or(self.max_signature_script_len), + prior_max_signature_script_len: overrides.prior_max_signature_script_len.unwrap_or(self.prior_max_signature_script_len), + new_max_signature_script_len: overrides.new_max_signature_script_len.unwrap_or(self.new_max_signature_script_len), max_script_public_key_len: overrides.max_script_public_key_len.unwrap_or(self.max_script_public_key_len), mass_per_tx_byte: overrides.mass_per_tx_byte.unwrap_or(self.mass_per_tx_byte), mass_per_script_pub_key_byte: overrides.mass_per_script_pub_key_byte.unwrap_or(self.mass_per_script_pub_key_byte), mass_per_sig_op: overrides.mass_per_sig_op.unwrap_or(self.mass_per_sig_op), - block_mass_limits: overrides.block_mass_limits.unwrap_or(self.block_mass_limits), + prior_block_mass_limits: overrides.prior_block_mass_limits.unwrap_or(self.prior_block_mass_limits), + new_transient_mass_limit: overrides.new_transient_mass_limit.unwrap_or(self.new_transient_mass_limit), block_lane_limits: overrides.block_lane_limits.unwrap_or(self.block_lane_limits), storage_mass_parameter: overrides.storage_mass_parameter.unwrap_or(self.storage_mass_parameter), @@ -573,7 +669,8 @@ pub const MAINNET_PARAMS: Params = Params { max_tx_inputs: 1000, max_tx_outputs: 1000, // Transient mass enforces a limit of 125Kb, however script engine max scripts size is 10Kb so there's no point in surpassing that. - max_signature_script_len: 10_000, + prior_max_signature_script_len: PRIOR_MAX_SIGNATURE_SCRIPT_LEN, + new_max_signature_script_len: NEW_MAX_SIGNATURE_SCRIPT_LEN, // Compute mass enforces a limit of ~45.5Kb, however script engine max scripts size is 10Kb so there's no point in surpassing that. // Note that storage mass will kick in and gradually penalize also for lower lengths (generalized KIP-0009, plurality will be high). max_script_public_key_len: 10_000, @@ -581,7 +678,8 @@ pub const MAINNET_PARAMS: Params = Params { mass_per_tx_byte: 1, mass_per_script_pub_key_byte: 10, mass_per_sig_op: 1000, - block_mass_limits: BlockMassLimits::with_shared_limit(500_000), + prior_block_mass_limits: BlockMassLimits::with_shared_limit(500_000), + new_transient_mass_limit: 1_000_000, block_lane_limits: BlockLaneLimits { lanes_per_block: DEFAULT_LANES_PER_BLOCK_LIMIT, gas_per_lane: DEFAULT_GAS_PER_LANE_LIMIT }, storage_mass_parameter: STORAGE_MASS_PARAMETER, @@ -631,7 +729,8 @@ pub const TESTNET_PARAMS: Params = Params { max_tx_inputs: 1000, max_tx_outputs: 1000, // Transient mass enforces a limit of 125Kb, however script engine max scripts size is 10Kb so there's no point in surpassing that. - max_signature_script_len: 10_000, + prior_max_signature_script_len: PRIOR_MAX_SIGNATURE_SCRIPT_LEN, + new_max_signature_script_len: NEW_MAX_SIGNATURE_SCRIPT_LEN, // Compute mass enforces a limit of ~45.5Kb, however script engine max scripts size is 10Kb so there's no point in surpassing that. // Note that storage mass will kick in and gradually penalize also for lower lengths (generalized KIP-0009, plurality will be high). max_script_public_key_len: 10_000, @@ -639,7 +738,8 @@ pub const TESTNET_PARAMS: Params = Params { mass_per_tx_byte: 1, mass_per_script_pub_key_byte: 10, mass_per_sig_op: 1000, - block_mass_limits: BlockMassLimits::with_shared_limit(500_000), + prior_block_mass_limits: BlockMassLimits::with_shared_limit(500_000), + new_transient_mass_limit: 1_000_000, block_lane_limits: BlockLaneLimits { lanes_per_block: DEFAULT_LANES_PER_BLOCK_LIMIT, gas_per_lane: DEFAULT_GAS_PER_LANE_LIMIT }, storage_mass_parameter: STORAGE_MASS_PARAMETER, @@ -680,11 +780,12 @@ pub const TESTNET12_PARAMS: Params = Params { net: NetworkId::with_suffix(NetworkType::Testnet, 12), genesis: TESTNET12_GENESIS, - // Increased for stark proofs - max_signature_script_len: 300_000, + prior_max_signature_script_len: NEW_MAX_SIGNATURE_SCRIPT_LEN, + new_max_signature_script_len: NEW_MAX_SIGNATURE_SCRIPT_LEN, // Transient mass is increased for stark proofs - block_mass_limits: BlockMassLimits { compute: 500_000, storage: 500_000, transient: 1_000_000 }, + prior_block_mass_limits: BlockMassLimits { compute: 500_000, storage: 500_000, transient: 1_000_000 }, + new_transient_mass_limit: 1_000_000, deflationary_phase_daa_score: TenBps::deflationary_phase_daa_score(), pre_deflationary_phase_base_subsidy: TenBps::pre_deflationary_phase_base_subsidy(), @@ -713,15 +814,16 @@ pub const SIMNET_PARAMS: Params = Params { max_tx_inputs: 1000, max_tx_outputs: 1000, - // Increased for stark proofs - max_signature_script_len: 300_000, + prior_max_signature_script_len: NEW_MAX_SIGNATURE_SCRIPT_LEN, + new_max_signature_script_len: NEW_MAX_SIGNATURE_SCRIPT_LEN, max_script_public_key_len: 10_000, mass_per_tx_byte: 1, mass_per_script_pub_key_byte: 10, mass_per_sig_op: 1000, // Transient mass is increased for stark proofs - block_mass_limits: BlockMassLimits { compute: 500_000, storage: 500_000, transient: 1_000_000 }, + prior_block_mass_limits: BlockMassLimits::with_shared_limit(500_000), + new_transient_mass_limit: 1_000_000, block_lane_limits: BlockLaneLimits { lanes_per_block: DEFAULT_LANES_PER_BLOCK_LIMIT, gas_per_lane: DEFAULT_GAS_PER_LANE_LIMIT }, storage_mass_parameter: STORAGE_MASS_PARAMETER, @@ -754,8 +856,8 @@ pub const DEVNET_PARAMS: Params = Params { max_tx_inputs: 1000, max_tx_outputs: 1000, - // Increased for stark proofs - max_signature_script_len: 300_000, + prior_max_signature_script_len: NEW_MAX_SIGNATURE_SCRIPT_LEN, + new_max_signature_script_len: NEW_MAX_SIGNATURE_SCRIPT_LEN, max_script_public_key_len: 10_000, mass_per_tx_byte: 1, @@ -763,7 +865,8 @@ pub const DEVNET_PARAMS: Params = Params { mass_per_sig_op: 1000, // Transient mass is increased for stark proofs - block_mass_limits: BlockMassLimits { compute: 500_000, storage: 500_000, transient: 1_000_000 }, + prior_block_mass_limits: BlockMassLimits::with_shared_limit(500_000), + new_transient_mass_limit: 1_000_000, block_lane_limits: BlockLaneLimits { lanes_per_block: DEFAULT_LANES_PER_BLOCK_LIMIT, gas_per_lane: DEFAULT_GAS_PER_LANE_LIMIT }, storage_mass_parameter: STORAGE_MASS_PARAMETER, diff --git a/consensus/core/src/mass/mod.rs b/consensus/core/src/mass/mod.rs index 26c3e653e5..29eb773e43 100644 --- a/consensus/core/src/mass/mod.rs +++ b/consensus/core/src/mass/mod.rs @@ -178,9 +178,14 @@ impl NonContextualMasses { pub fn normalized_max(&self, cofactors: &MassCofactors) -> u64 { // Compute mass is already in the reference scale (compute limit). let c = self.compute_mass; - let t = (self.transient_mass as f64 * cofactors.transient).ceil() as u64; + let t = self.normalized_transient(cofactors); c.max(t) } + + /// Returns transient mass normalized to the compute-mass scale. + pub fn normalized_transient(&self, cofactors: &MassCofactors) -> u64 { + (self.transient_mass as f64 * cofactors.transient).ceil() as u64 + } } impl std::fmt::Display for NonContextualMasses { @@ -526,7 +531,7 @@ mod tests { for net in NetworkType::iter() { let params: Params = net.into(); let max_spk_len = (params.max_script_public_key_len as u64) - .min(params.block_mass_limits.compute.div_ceil(params.mass_per_script_pub_key_byte)); + .min(params.block_mass_limits().after().compute.div_ceil(params.mass_per_script_pub_key_byte)); let max_plurality = (UTXO_CONST_STORAGE + max_spk_len).div_ceil(UTXO_UNIT_SIZE); // see utxo_plurality let product = params.storage_mass_parameter.checked_mul(max_plurality).and_then(|x| x.checked_mul(max_plurality)); // verify C·P^2 can never overflow diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 6c53f3a0ee..573c57320c 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -136,7 +136,7 @@ impl ConsensusServices { let transaction_validator = TransactionValidator::new( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), diff --git a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs index 12aee1bca2..f61cb5786d 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs @@ -65,6 +65,7 @@ impl BlockBodyProcessor { } fn check_block_mass(self: &Arc, block: &Block) -> BlockProcessResult { + let block_mass_limits = self.block_mass_limits.get(block.header.daa_score); let mut total_compute_mass: u64 = 0; let mut total_transient_mass: u64 = 0; let mut total_storage_mass: u64 = 0; @@ -84,14 +85,14 @@ impl BlockBodyProcessor { total_storage_mass = total_storage_mass.saturating_add(storage_mass_commitment); // Verify each dimension against its own limit - if total_compute_mass > self.block_mass_limits.compute { - return Err(RuleError::ExceedsComputeMassLimit(total_compute_mass, self.block_mass_limits.compute)); + if total_compute_mass > block_mass_limits.compute { + return Err(RuleError::ExceedsComputeMassLimit(total_compute_mass, block_mass_limits.compute)); } - if total_transient_mass > self.block_mass_limits.transient { - return Err(RuleError::ExceedsTransientMassLimit(total_transient_mass, self.block_mass_limits.transient)); + if total_transient_mass > block_mass_limits.transient { + return Err(RuleError::ExceedsTransientMassLimit(total_transient_mass, block_mass_limits.transient)); } - if total_storage_mass > self.block_mass_limits.storage { - return Err(RuleError::ExceedsStorageMassLimit(total_storage_mass, self.block_mass_limits.storage)); + if total_storage_mass > block_mass_limits.storage { + return Err(RuleError::ExceedsStorageMassLimit(total_storage_mass, block_mass_limits.storage)); } // Pre-Toccata valid blocks contain only native non-coinbase txs with zero gas, diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 2903a3db85..3a88fbf40a 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -27,7 +27,10 @@ use kaspa_consensus_core::{ KType, block::Block, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - config::{genesis::GenesisBlock, params::Params}, + config::{ + genesis::GenesisBlock, + params::{ForkedParam, Params}, + }, mass::{BlockLaneLimits, BlockMassLimits, Mass, MassCalculator}, tx::Transaction, }; @@ -55,7 +58,7 @@ pub struct BlockBodyProcessor { db: Arc, // Config - pub(super) block_mass_limits: BlockMassLimits, + pub(super) block_mass_limits: ForkedParam, pub(super) block_lane_limits: BlockLaneLimits, pub(super) genesis: GenesisBlock, pub(super) _ghostdag_k: KType, @@ -108,7 +111,7 @@ impl BlockBodyProcessor { thread_pool, db, - block_mass_limits: params.block_mass_limits, + block_mass_limits: params.block_mass_limits(), block_lane_limits: params.block_lane_limits, genesis: params.genesis.clone(), _ghostdag_k: params.ghostdag_k(), diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 92e82dd0fd..336b009a83 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -119,7 +119,7 @@ pub struct VirtualStateProcessor { pub(super) max_block_parents: u8, pub(super) mergeset_size_limit: u64, pub(super) finality_depth: u64, - pub(super) mass_cofactors: kaspa_consensus_core::mass::MassCofactors, + pub(super) mempool_mass_cofactors: kaspa_consensus_core::config::params::ForkedParam, // Stores pub(super) statuses_store: Arc>, @@ -206,7 +206,7 @@ impl VirtualStateProcessor { genesis: params.genesis.clone(), max_block_parents: params.max_block_parents(), mergeset_size_limit: params.mergeset_size_limit(), - mass_cofactors: params.block_mass_limits.cofactors(), + mempool_mass_cofactors: params.mempool_block_mass_cofactors(), db, statuses_store: storage.statuses_store.clone(), diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index f9d3d8905a..a9eaede371 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -472,7 +472,7 @@ impl VirtualStateProcessor { // At this point we know all UTXO entries are populated, so we can safely pass the tx as verifiable let mass_and_feerate_threshold = args.feerate_threshold.map(|threshold| { let mass = kaspa_consensus_core::mass::Mass::new(mutable_tx.calculated_non_contextual_masses.unwrap(), contextual_mass); - (mass.normalized_max(&self.mass_cofactors), threshold) + (mass.normalized_max(&self.mempool_mass_cofactors.get(pov_daa_score)), threshold) }); let seq_commit_accessor = if self.covenants_activation.is_active(pov_daa_score) { diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index f804799186..2a090df84a 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -9,13 +9,17 @@ use kaspa_txscript::{ caches::{Cache, TxScriptCacheCounters}, }; -use kaspa_consensus_core::{KType, config::params::ForkActivation, mass::MassCalculator}; +use kaspa_consensus_core::{ + KType, + config::params::{ForkActivation, ForkedParam}, + mass::MassCalculator, +}; #[derive(Clone)] pub struct TransactionValidator { max_tx_inputs: usize, max_tx_outputs: usize, - max_signature_script_len: usize, + max_signature_script_len: ForkedParam, max_script_public_key_len: usize, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, @@ -32,7 +36,7 @@ impl TransactionValidator { pub fn new( max_tx_inputs: usize, max_tx_outputs: usize, - max_signature_script_len: usize, + max_signature_script_len: impl Into>, max_script_public_key_len: usize, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, @@ -45,7 +49,7 @@ impl TransactionValidator { Self { max_tx_inputs, max_tx_outputs, - max_signature_script_len, + max_signature_script_len: max_signature_script_len.into(), max_script_public_key_len, coinbase_payload_script_public_key_max_len, coinbase_maturity, @@ -60,7 +64,7 @@ impl TransactionValidator { pub fn new_for_tests( max_tx_inputs: usize, max_tx_outputs: usize, - max_signature_script_len: usize, + max_signature_script_len: impl Into>, max_script_public_key_len: usize, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, @@ -70,7 +74,7 @@ impl TransactionValidator { Self { max_tx_inputs, max_tx_outputs, - max_signature_script_len, + max_signature_script_len: max_signature_script_len.into(), max_script_public_key_len, coinbase_payload_script_public_key_max_len, coinbase_maturity, diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs index 7496e3d0a3..200a58158d 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs @@ -49,7 +49,8 @@ impl TransactionValidator { block_daa_score: u64, ) -> TxResult<()> { self.check_tx_is_finalized(tx, lock_time_arg)?; - self.check_transaction_version(tx, block_daa_score) + self.check_transaction_version(tx, block_daa_score)?; + self.check_transaction_signature_scripts_in_header_context(tx, block_daa_score) } pub(crate) fn get_lock_time_type(tx: &Transaction) -> LockTimeType { @@ -106,4 +107,14 @@ impl TransactionValidator { Ok(()) } + + // TODO(post-toccata): Remove this and restore the context-free check_transaction_signature_scripts. + fn check_transaction_signature_scripts_in_header_context(&self, tx: &Transaction, block_daa_score: u64) -> TxResult<()> { + let max_signature_script_len = self.max_signature_script_len.get(block_daa_score); + if let Some(i) = tx.inputs.iter().position(|input| input.signature_script.len() > max_signature_script_len) { + return Err(TxRuleError::TooBigSignatureScript(i, max_signature_script_len)); + } + + Ok(()) + } } diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index e5451b83c8..22a7924db7 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -89,8 +89,11 @@ impl TransactionValidator { // The main purpose of this check is to avoid overflows when calculating transaction mass later. fn check_transaction_signature_scripts(&self, tx: &Transaction) -> TxResult<()> { - if let Some(i) = tx.inputs.iter().position(|input| input.signature_script.len() > self.max_signature_script_len) { - return Err(TxRuleError::TooBigSignatureScript(i, self.max_signature_script_len)); + // TODO(post-toccata): restore this to the const post-activation limit and remove + // check_transaction_signature_scripts_in_header_context. + let max_signature_script_len = self.max_signature_script_len.upper_bound(); + if let Some(i) = tx.inputs.iter().position(|input| input.signature_script.len() > max_signature_script_len) { + return Err(TxRuleError::TooBigSignatureScript(i, max_signature_script_len)); } Ok(()) @@ -226,7 +229,7 @@ mod tests { use kaspa_core::assert_match; use crate::{ - params::MAINNET_PARAMS, + params::{ForkActivation, MAINNET_PARAMS}, processes::transaction_validator::{TransactionValidator, errors::TxRuleError, tx_validation_in_header_context::LockTimeArg}, }; @@ -238,7 +241,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -347,9 +350,30 @@ mod tests { assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::TooManyInputs(_, _))); let mut tx = valid_tx.clone(); - tx.inputs[0].signature_script = vec![0; params.max_signature_script_len + 1]; + tx.inputs[0].signature_script = vec![0; params.max_signature_script_len().upper_bound() + 1]; assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::TooBigSignatureScript(_, _))); + let mut forked_params = params.clone(); + forked_params.covenants_activation = ForkActivation::new(100); + let forked_tv = TransactionValidator::new_for_tests( + forked_params.max_tx_inputs, + forked_params.max_tx_outputs, + forked_params.max_signature_script_len(), + forked_params.max_script_public_key_len, + forked_params.coinbase_payload_script_public_key_max_len, + forked_params.coinbase_maturity(), + forked_params.ghostdag_k(), + Default::default(), + ); + let mut tx = valid_tx.clone(); + tx.inputs[0].signature_script = vec![0; forked_params.prior_max_signature_script_len + 1]; + assert_match!(forked_tv.validate_tx_in_isolation(&tx), Ok(())); + assert_match!( + forked_tv.validate_tx_in_header_context(&tx, LockTimeArg::Finalized, 99), + Err(TxRuleError::TooBigSignatureScript(_, _)) + ); + assert_match!(forked_tv.validate_tx_in_header_context(&tx, LockTimeArg::Finalized, 100), Ok(())); + let mut tx = valid_tx.clone(); tx.outputs = (0..params.max_tx_outputs + 1).map(|_| valid_tx.outputs[0].clone()).collect(); assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::TooManyOutputs(_, _))); diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs index 7424d432d1..e38fe860d4 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs @@ -351,7 +351,7 @@ mod tests { let tv = TransactionValidator::new( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -509,7 +509,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -584,7 +584,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -664,7 +664,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -745,7 +745,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -828,7 +828,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -911,7 +911,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -993,7 +993,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), @@ -1062,7 +1062,7 @@ mod tests { let tv = TransactionValidator::new_for_tests( params.max_tx_inputs, params.max_tx_outputs, - params.max_signature_script_len, + params.max_signature_script_len(), params.max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), diff --git a/crypto/txscript/src/zk_precompiles/tags.rs b/crypto/txscript/src/zk_precompiles/tags.rs index 803bb3211b..124ea2acfb 100644 --- a/crypto/txscript/src/zk_precompiles/tags.rs +++ b/crypto/txscript/src/zk_precompiles/tags.rs @@ -73,7 +73,7 @@ mod tests { } fn cost_block_capacity(tag: ZkTag) -> u64 { - let compute_mass_limit = MAINNET_PARAMS.block_mass_limits.compute; + let compute_mass_limit = MAINNET_PARAMS.block_mass_limits().after().compute; let cost_in_compute_mass = u64::from(tag.cost()) / SCRIPT_UNITS_PER_GRAM; compute_mass_limit / cost_in_compute_mass } diff --git a/docs/override-params.md b/docs/override-params.md index 799ca1a06a..cf9a009cc9 100644 --- a/docs/override-params.md +++ b/docs/override-params.md @@ -34,7 +34,12 @@ If the file cannot be read or parsed, `kaspad` prints the error and exits. "mass_per_tx_byte": 1, "mass_per_script_pub_key_byte": 10, "mass_per_sig_op": 1000, - "max_block_mass": 500000, + "prior_block_mass_limits": { + "storage": 500000, + "compute": 500000, + "transient": 500000 + }, + "new_transient_mass_limit": 1000000, "storage_mass_parameter": 10000, "deflationary_phase_daa_score": 15519600, "pre_deflationary_phase_base_subsidy": 50000000000, @@ -73,12 +78,14 @@ because they have logical relations and should be modified as a unit. | max_coinbase_payload_len | Maximum coinbase payload length | | max_tx_inputs | Max transaction inputs | | max_tx_outputs | Max transaction outputs | -| max_signature_script_len | Max signature script length | +| prior_max_signature_script_len | Pre-Toccata max signature script length | +| new_max_signature_script_len | Post-Toccata max signature script length | | max_script_public_key_len | Max script public key length | | mass_per_tx_byte | Mass per transaction byte | | mass_per_script_pub_key_byte | Mass per script public key byte | | mass_per_sig_op | Mass per signature operation | -| max_block_mass | Maximum block mass | +| prior_block_mass_limits | Pre-Toccata block mass limits | +| new_transient_mass_limit | Post-Toccata transient mass limit | | storage_mass_parameter | Storage mass parameter | | deflationary_phase_daa_score | Deflationary phase DAA score | | pre_deflationary_phase_base_subsidy | Pre-deflationary phase base subsidy | @@ -132,4 +139,4 @@ mv /path/to/simpa/database ~/.rusty-kaspa/kaspa-simnet/datadir/consensus/consens And finally launch kaspad with: ```bash kaspad --simnet --override-params-file overrides.json -``` \ No newline at end of file +``` diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index bdfcb1f430..2817be52c8 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -374,7 +374,8 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm // This worst case usage only considers block space. It does not account for usage of // other stores (reachability, block status, mempool, etc.) let worst_case_usage = ((total_blocks + finality_depth) - * (config.block_mass_limits.transient / TRANSIENT_BYTE_TO_MASS_FACTOR)) as f64 + * (config.block_mass_limits().after().transient / TRANSIENT_BYTE_TO_MASS_FACTOR)) + as f64 / ONE_GIGABYTE; info!( @@ -628,7 +629,7 @@ Do you confirm? (y/n)"; let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new_with_extended_config( config.target_time_per_block(), false, - config.block_mass_limits, + config.mempool_block_mass_limits(), config.block_lane_limits, config.ram_scale, config.block_template_cache_lifetime, diff --git a/mining/errors/src/mempool.rs b/mining/errors/src/mempool.rs index b5a0d2140b..f7239c9928 100644 --- a/mining/errors/src/mempool.rs +++ b/mining/errors/src/mempool.rs @@ -41,6 +41,18 @@ pub enum RuleError { #[error("transaction {0} is not standard: {1}")] RejectNonStandard(TransactionId, String), + #[error("transaction compute mass of {1} is larger than max allowed size of {2}")] + RejectComputeMass(TransactionId, u64, u64), + + #[error("transaction transient (storage) mass of {1} is larger than max allowed size of {2}")] + RejectTransientMass(TransactionId, u64, u64), + + #[error("transaction storage mass of {1} is larger than max allowed size of {2}")] + RejectStorageMass(TransactionId, u64, u64), + + #[error("transaction gas of {1} is larger than max allowed per-lane gas of {2}")] + RejectGas(TransactionId, u64, u64), + #[error("one of the transaction inputs spends an immature UTXO: {0}")] RejectImmatureSpend(TxRuleError), @@ -124,14 +136,16 @@ pub enum NonStandardError { #[error("transaction output #{1}: non-standard script form")] RejectOutputScriptClass(TransactionId, usize), - #[error("transaction output #{1}: payment of {2} is dust")] - RejectDust(TransactionId, usize, u64), - #[error("transaction input {1}: non-standard script form")] RejectInputScriptClass(TransactionId, usize), - #[error("transaction has {1} fees which is under the required amount of {2}")] - RejectInsufficientFee(TransactionId, u64, u64), + #[error("transaction has {1} fees which is under the required amount of {2} for compute mass {3}")] + RejectInsufficientComputeFee(TransactionId, u64, u64, u64), + + #[error( + "transaction has {1} fees which is under the required amount of {2} for normalized transient mass {3} (proportional to transaction byte size)" + )] + RejectInsufficientTransientFee(TransactionId, u64, u64, u64), #[error("transaction input #{1} has {2} signature operations which is more than the allowed max amount of {3}")] RejectSignatureCount(TransactionId, usize, u64, u16), @@ -148,9 +162,9 @@ impl NonStandardError { NonStandardError::RejectSignatureScriptSize(id, _, _, _) => id, NonStandardError::RejectScriptPublicKeyVersion(id, _) => id, NonStandardError::RejectOutputScriptClass(id, _) => id, - NonStandardError::RejectDust(id, _, _) => id, NonStandardError::RejectInputScriptClass(id, _) => id, - NonStandardError::RejectInsufficientFee(id, _, _) => id, + NonStandardError::RejectInsufficientComputeFee(id, _, _, _) => id, + NonStandardError::RejectInsufficientTransientFee(id, _, _, _) => id, NonStandardError::RejectSignatureCount(id, _, _, _) => id, } } diff --git a/mining/src/lib.rs b/mining/src/lib.rs index ab09260cf3..dda9e23406 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -14,6 +14,8 @@ mod manager_tests; pub mod mempool; pub mod model; pub mod monitor; +#[cfg(test)] +mod toccata_transient_mass_activation_tests; // Exposed for benchmarks pub use block_template::policy::Policy; diff --git a/mining/src/manager.rs b/mining/src/manager.rs index d757f1e1c4..55100f2de9 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -28,9 +28,10 @@ use kaspa_consensus_core::{ }, block::{BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, coinbase::MinerData, + config::params::ForkedParam, errors::{block::RuleError as BlockRuleError, tx::TxRuleError}, mass::{BlockLaneLimits, BlockMassLimits}, - tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, + tx::{MutableTransaction, Transaction, TransactionId}, }; use kaspa_consensusmanager::{ConsensusProxy, spawn_blocking}; use kaspa_core::{debug, error, info, time::Stopwatch, warn}; @@ -50,26 +51,28 @@ impl MiningManager { pub fn new( target_time_per_block: u64, relay_non_std_transactions: bool, - block_mass_limits: BlockMassLimits, + mempool_block_mass_limits: impl Into>, block_lane_limits: BlockLaneLimits, cache_lifetime: Option, counters: Arc, ) -> Self { - let config = Config::build_default(target_time_per_block, relay_non_std_transactions, block_mass_limits, block_lane_limits); + let config = + Config::build_default(target_time_per_block, relay_non_std_transactions, mempool_block_mass_limits, block_lane_limits); Self::with_config(config, cache_lifetime, counters) } pub fn new_with_extended_config( target_time_per_block: u64, relay_non_std_transactions: bool, - block_mass_limits: BlockMassLimits, + mempool_block_mass_limits: impl Into>, block_lane_limits: BlockLaneLimits, ram_scale: f64, cache_lifetime: Option, counters: Arc, ) -> Self { - let config = Config::build_default(target_time_per_block, relay_non_std_transactions, block_mass_limits, block_lane_limits) - .apply_ram_scale(ram_scale); + let config = + Config::build_default(target_time_per_block, relay_non_std_transactions, mempool_block_mass_limits, block_lane_limits) + .apply_ram_scale(ram_scale); Self::with_config(config, cache_lifetime, counters) } @@ -207,7 +210,8 @@ impl MiningManager { /// Returns realtime feerate estimations based on internal mempool state pub(crate) fn get_realtime_feerate_estimations(&self) -> FeerateEstimations { - let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.block_mass_cofactors.reference); + let args = + FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.mempool_mass_cofactors.after().reference); let estimator = self.mempool.read().build_feerate_estimator(args); estimator.calc_estimations(self.config.minimum_feerate()) } @@ -218,7 +222,8 @@ impl MiningManager { consensus: &dyn ConsensusApi, prefix: kaspa_addresses::Prefix, ) -> MiningManagerResult { - let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.block_mass_cofactors.reference); + let args = + FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.mempool_mass_cofactors.after().reference); let network_mass_per_second = args.network_mass_per_second(); let mempool_read = self.mempool.read(); let estimator = mempool_read.build_feerate_estimator(args); @@ -350,7 +355,8 @@ impl MiningManager { // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. let mut lower_bound: usize = 0; let mut validation_results = Vec::with_capacity(transactions.len()); - while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + let virtual_daa_score = consensus.get_virtual_daa_score(); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound, virtual_daa_score) { assert!(lower_bound < upper_bound, "the chunk is never empty"); validation_results.extend(validate_mempool_transactions_in_parallel( consensus, @@ -462,7 +468,8 @@ impl MiningManager { // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. let mut lower_bound: usize = 0; let mut validation_results = Vec::with_capacity(transactions.len()); - while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + let virtual_daa_score = consensus.get_virtual_daa_score(); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound, virtual_daa_score) { assert!(lower_bound < upper_bound, "the chunk is never empty"); validation_results.extend(validate_mempool_transactions_in_parallel( consensus, @@ -511,16 +518,22 @@ impl MiningManager { insert_results } - fn next_transaction_chunk_upper_bound(&self, transactions: &[MutableTransaction], lower_bound: usize) -> Option { + fn next_transaction_chunk_upper_bound( + &self, + transactions: &[MutableTransaction], + lower_bound: usize, + virtual_daa_score: u64, + ) -> Option { if lower_bound >= transactions.len() { return None; } + let cofactors = self.config.mempool_mass_cofactors.get(virtual_daa_score); let mut mass = 0; transactions[lower_bound..] .iter() .position(|tx| { - mass += tx.calculated_non_contextual_masses.unwrap().normalized_max(&self.config.block_mass_cofactors); - mass >= self.config.block_mass_cofactors.reference + mass += tx.calculated_non_contextual_masses.unwrap().normalized_max(&cofactors); + mass >= cofactors.reference }) // Make sure the upper bound is greater than the lower bound, allowing to handle a very unlikely, // (if not impossible) case where the mass of a single transaction is greater than the maximum @@ -702,7 +715,8 @@ impl MiningManager { // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. let mut lower_bound: usize = 0; let mut validation_results = Vec::with_capacity(transactions.len()); - while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + let virtual_daa_score = consensus.get_virtual_daa_score(); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound, virtual_daa_score) { assert!(lower_bound < upper_bound, "the chunk is never empty"); let _swo = Stopwatch::<60>::with_threshold("revalidate validate_mempool_transactions_in_parallel op"); validation_results @@ -812,17 +826,6 @@ impl MiningManager { } } - /// is_transaction_output_dust returns whether or not the passed transaction output - /// amount is considered dust or not based on the configured minimum transaction - /// relay fee. - /// - /// Dust is defined in terms of the minimum transaction relay fee. In particular, - /// if the cost to the network to spend coins is more than 1/3 of the minimum - /// transaction relay fee, it is considered dust. - pub fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { - self.mempool.read().is_transaction_output_dust(transaction_output) - } - pub fn has_accepted_transaction(&self, transaction_id: &TransactionId) -> bool { self.mempool.read().has_accepted_transaction(transaction_id) } diff --git a/mining/src/mempool/check_transaction_limits.rs b/mining/src/mempool/check_transaction_limits.rs new file mode 100644 index 0000000000..6d55a0cd4b --- /dev/null +++ b/mining/src/mempool/check_transaction_limits.rs @@ -0,0 +1,196 @@ +use crate::mempool::{ + Mempool, + errors::{RuleError, RuleResult}, +}; +use kaspa_consensus_core::{mass::NonContextualMasses, tx::MutableTransaction}; + +impl Mempool { + /// Validates non-contextual transaction dimensions against the consensus block limits. + /// + /// This is intentionally separate from standardness: even when non-standard transactions are accepted, + /// the mempool must not admit a transaction which selectors can never include in a block. The transaction + /// is expected to have its non-contextual masses populated before this call. These checks run before + /// consensus in-context validation so transactions above compute/transient limits do not reach script execution. + pub(crate) fn validate_transaction_limits_in_isolation( + &self, + transaction: &MutableTransaction, + virtual_daa_score: u64, + ) -> RuleResult<()> { + if transaction.tx.gas > self.config.block_lane_limits.gas_per_lane { + return Err(RuleError::RejectGas(transaction.id(), transaction.tx.gas, self.config.block_lane_limits.gas_per_lane)); + } + + let limits = self.config.mempool_block_mass_limits.get(virtual_daa_score); + let NonContextualMasses { compute_mass, transient_mass } = transaction.calculated_non_contextual_masses.unwrap(); + if compute_mass > limits.compute { + return Err(RuleError::RejectComputeMass(transaction.id(), compute_mass, limits.compute)); + } + if transient_mass > limits.transient { + return Err(RuleError::RejectTransientMass(transaction.id(), transient_mass, limits.transient)); + } + + Ok(()) + } + + /// Validates contextual transaction dimensions against the consensus block limits. + /// + /// This is intentionally separate from standardness: even when non-standard transactions are accepted, + /// the mempool must not admit a transaction which selectors can never include in a block. The transaction + /// is expected to have contextual storage mass populated by consensus validation before this call. + pub(crate) fn validate_transaction_limits_in_context( + &self, + transaction: &MutableTransaction, + virtual_daa_score: u64, + ) -> RuleResult<()> { + let limits = self.config.mempool_block_mass_limits.get(virtual_daa_score); + let storage_mass = transaction.tx.mass(); + if storage_mass > limits.storage { + return Err(RuleError::RejectStorageMass(transaction.id(), storage_mass, limits.storage)); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{MiningCounters, mempool::config::Config}; + use kaspa_consensus_core::{ + config::constants::consensus::DEFAULT_LANES_PER_BLOCK_LIMIT, + constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, + mass::{BlockLaneLimits, BlockMassLimits}, + subnets::SUBNETWORK_ID_NATIVE, + tx::{ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry, scriptvec}, + }; + use kaspa_hashes::Hash; + use std::sync::Arc; + + const LIMITS: BlockMassLimits = BlockMassLimits { compute: 100, storage: 200, transient: 300 }; + const GAS_PER_LANE: u64 = 7; + + enum Expected { + Ok, + RejectGas(u64, u64), + RejectComputeMass(u64, u64), + RejectTransientMass(u64, u64), + RejectStorageMass(u64, u64), + } + + struct Test { + name: &'static str, + gas: u64, + compute_mass: u64, + transient_mass: u64, + storage_mass: u64, + expected: Expected, + } + + fn mempool() -> Mempool { + let config = Config::build_default( + 100, + true, + LIMITS, + BlockLaneLimits { lanes_per_block: DEFAULT_LANES_PER_BLOCK_LIMIT, gas_per_lane: GAS_PER_LANE }, + ); + Mempool::new(Arc::new(config), Arc::new(MiningCounters::default())) + } + + fn transaction(gas: u64, compute_mass: u64, transient_mass: u64, storage_mass: u64) -> MutableTransaction { + let script_public_key = ScriptPublicKey::new(0, scriptvec![0x51]); + let outpoint = TransactionOutpoint::new(Hash::from_u64_word(1), 0); + let input = TransactionInput::new(outpoint, vec![], MAX_TX_IN_SEQUENCE_NUM, 0); + let output = TransactionOutput::new(SOMPI_PER_KASPA, script_public_key.clone()); + let tx = Transaction::new(TX_VERSION, vec![input], vec![output], 0, SUBNETWORK_ID_NATIVE, gas, vec![]); + tx.set_mass(storage_mass); + let entry = UtxoEntry::new(SOMPI_PER_KASPA, script_public_key, 0, false, None); + let mut tx = MutableTransaction::with_entries(tx.into(), vec![entry]); + tx.calculated_non_contextual_masses = Some(NonContextualMasses::new(compute_mass, transient_mass)); + tx + } + + fn assert_expected(name: &str, result: RuleResult<()>, tx: &MutableTransaction, expected: Expected) { + let expected = match expected { + Expected::Ok => Ok(()), + Expected::RejectGas(gas, limit) => Err(RuleError::RejectGas(tx.id(), gas, limit)), + Expected::RejectComputeMass(mass, limit) => Err(RuleError::RejectComputeMass(tx.id(), mass, limit)), + Expected::RejectTransientMass(mass, limit) => Err(RuleError::RejectTransientMass(tx.id(), mass, limit)), + Expected::RejectStorageMass(mass, limit) => Err(RuleError::RejectStorageMass(tx.id(), mass, limit)), + }; + assert_eq!(result, expected, "failed for test '{name}'"); + } + + #[test] + fn test_validate_transaction_limits_in_isolation() { + let tests = [ + Test { + name: "non-contextual values at limits", + gas: GAS_PER_LANE, + compute_mass: LIMITS.compute, + transient_mass: LIMITS.transient, + storage_mass: 0, + expected: Expected::Ok, + }, + Test { + name: "transaction gas exceeds the per-lane limit", + gas: GAS_PER_LANE + 1, + compute_mass: 1, + transient_mass: 1, + storage_mass: 0, + expected: Expected::RejectGas(GAS_PER_LANE + 1, GAS_PER_LANE), + }, + Test { + name: "transaction compute mass exceeds the block limit", + gas: 0, + compute_mass: LIMITS.compute + 1, + transient_mass: 1, + storage_mass: 0, + expected: Expected::RejectComputeMass(LIMITS.compute + 1, LIMITS.compute), + }, + Test { + name: "transaction transient byte size exceeds the block limit", + gas: 0, + compute_mass: 1, + transient_mass: LIMITS.transient + 1, + storage_mass: 0, + expected: Expected::RejectTransientMass(LIMITS.transient + 1, LIMITS.transient), + }, + ]; + + let mempool = mempool(); + for test in tests { + let tx = transaction(test.gas, test.compute_mass, test.transient_mass, test.storage_mass); + let result = mempool.validate_transaction_limits_in_isolation(&tx, 0); + assert_expected(test.name, result, &tx, test.expected); + } + } + + #[test] + fn test_validate_transaction_limits_in_context() { + let tests = [ + Test { + name: "contextual storage value at limit", + gas: 0, + compute_mass: 1, + transient_mass: 1, + storage_mass: LIMITS.storage, + expected: Expected::Ok, + }, + Test { + name: "transaction storage mass exceeds the block limit", + gas: 0, + compute_mass: 1, + transient_mass: 1, + storage_mass: LIMITS.storage + 1, + expected: Expected::RejectStorageMass(LIMITS.storage + 1, LIMITS.storage), + }, + ]; + + let mempool = mempool(); + for test in tests { + let tx = transaction(test.gas, test.compute_mass, test.transient_mass, test.storage_mass); + let result = mempool.validate_transaction_limits_in_context(&tx, 0); + assert_expected(test.name, result, &tx, test.expected); + } + } +} diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index 91b37b0a0b..a3cd06bdf4 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -2,83 +2,28 @@ use crate::mempool::{ Mempool, errors::{NonStandardError, NonStandardResult}, }; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::{ constants::{MAX_SCRIPT_PUBLIC_KEY_VERSION, MAX_SOMPI}, - mass, - tx::{MutableTransaction, PopulatedTransaction, TransactionOutput}, + tx::{MutableTransaction, PopulatedTransaction}, }; -use kaspa_consensus_core::{hashing::sighash::SigHashReusedValuesUnsync, mass::NonContextualMasses}; -use kaspa_txscript::{get_sig_op_count_upper_bound, is_unspendable, script_class::ScriptClass}; +use kaspa_txscript::{get_sig_op_count_upper_bound, script_class::ScriptClass}; /// MAX_STANDARD_P2SH_SIG_OPS is the maximum number of signature operations /// that are considered standard in a pay-to-script-hash script. -const MAX_STANDARD_P2SH_SIG_OPS: u16 = 1000; // TODO(covpp-mainnet) - -/// MAXIMUM_STANDARD_SIGNATURE_SCRIPT_SIZE is the maximum size allowed for a -/// transaction input signature script to be considered standard. This -/// value allows for a 15-of-15 CHECKMULTISIG pay-to-script-hash with -/// compressed keys. -/// -/// The form of the overall script is: OP_0 <15 signatures> OP_PUSHDATA2 -/// <2 bytes len> [OP_15 <15 pubkeys> OP_15 OP_CHECKMULTISIG] /// -/// For the p2sh script portion, each of the 15 compressed pubkeys are -/// 33 bytes (plus one for the OP_DATA_33 opcode), and the thus it totals -/// to (15*34)+3 = 513 bytes. Next, each of the 15 signatures is a max -/// of 73 bytes (plus one for the OP_DATA_73 opcode). Also, there is one -/// extra byte for the initial extra OP_0 push and 3 bytes for the -/// OP_PUSHDATA2 needed to specify the 513 bytes for the script push. -/// That brings the total to 1+(15*74)+3+513 = 1627. This value also -/// adds a few extra bytes to provide a little buffer. -/// (1 + 15*74 + 3) + (15*34 + 3) + 23 = 1650 -const MAXIMUM_STANDARD_SIGNATURE_SCRIPT_SIZE: u64 = 300_000; // TODO(covpp-mainnet) - -/// MAXIMUM_STANDARD_TRANSACTION_MASS is the maximum mass allowed for transactions that -/// are considered standard and will therefore be relayed and considered for mining. -const MAXIMUM_STANDARD_TRANSACTION_MASS: u64 = 500_000; // TODO(covpp-mainnet) -const MAXIMUM_STANDARD_TRANSACTION_TRANSIENT_MASS: u64 = 1_000_000; // TODO(covpp-mainnet) +/// The upper-bound execution limit comes from compute mass: some zk opcodes already cost the equivalent +/// of roughly 140-250 signature operations. However, for classic Schnorr/ECDSA signature operations, this +/// standardness limit encourages parallelism across inputs rather than concentrating work in one input. +/// It is also at least as permissive as the previous standard compute-mass limit of 100k, +/// which allowed at most 100 sigops since each sigop costs 1000 grams. +const MAX_STANDARD_P2SH_SIG_OPS: u16 = 100; impl Mempool { pub(crate) fn check_transaction_standard_in_isolation(&self, transaction: &MutableTransaction) -> NonStandardResult<()> { let transaction_id = transaction.id(); - // Since extremely large transactions with a lot of inputs can cost - // almost as much to process as the sender fees, limit the maximum - // size of a transaction. This also helps mitigate CPU exhaustion - // attacks. - let NonContextualMasses { compute_mass, transient_mass } = transaction.calculated_non_contextual_masses.unwrap(); - if compute_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { - return Err(NonStandardError::RejectComputeMass(transaction_id, compute_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); - } - if transient_mass > MAXIMUM_STANDARD_TRANSACTION_TRANSIENT_MASS { - return Err(NonStandardError::RejectTransientMass( - transaction_id, - transient_mass, - MAXIMUM_STANDARD_TRANSACTION_TRANSIENT_MASS, - )); - } - if transaction.tx.gas > self.config.block_lane_limits.gas_per_lane { - return Err(NonStandardError::RejectGas(transaction_id, transaction.tx.gas, self.config.block_lane_limits.gas_per_lane)); - } - - for (i, input) in transaction.tx.inputs.iter().enumerate() { - // Each transaction input signature script must not exceed the - // maximum size allowed for a standard transaction. - // - // See the comment on MAXIMUM_STANDARD_SIGNATURE_SCRIPT_SIZE for - // more details. - let signature_script_len = input.signature_script.len() as u64; - if signature_script_len > MAXIMUM_STANDARD_SIGNATURE_SCRIPT_SIZE { - return Err(NonStandardError::RejectSignatureScriptSize( - transaction_id, - i, - signature_script_len, - MAXIMUM_STANDARD_SIGNATURE_SCRIPT_SIZE, - )); - } - } - - // None of the output public key scripts can be a non-standard script or be "dust". + // None of the output public key scripts can be a non-standard script. for (i, output) in transaction.tx.outputs.iter().enumerate() { if output.script_public_key.version() > MAX_SCRIPT_PUBLIC_KEY_VERSION { return Err(NonStandardError::RejectScriptPublicKeyVersion(transaction_id, i)); @@ -87,73 +32,11 @@ impl Mempool { if ScriptClass::from_script(&output.script_public_key) == ScriptClass::NonStandard { return Err(NonStandardError::RejectOutputScriptClass(transaction_id, i)); } - - if self.is_transaction_output_dust(output) { - return Err(NonStandardError::RejectDust(transaction_id, i, output.value)); - } } Ok(()) } - /// is_transaction_output_dust returns whether or not the passed transaction output - /// amount is considered dust or not based on the configured minimum transaction - /// relay fee. - /// - /// Dust is defined in terms of the minimum transaction relay fee. In particular, - /// if the cost to the network to spend coins is more than 1/3 of the minimum - /// transaction relay fee, it is considered dust. - /// - /// It is exposed by [MiningManager] for use by transaction generators and wallets. - pub(crate) fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { - // Unspendable outputs are considered dust. - if is_unspendable::(transaction_output.script_public_key.script()) { - return true; - } - - // The total serialized size consists of the output and the associated - // input script to redeem it. Since there is no input script - // to redeem it yet, use the minimum size of a typical input script. - // - // Pay-to-pubkey bytes breakdown: - // - // Output to pubkey (43 bytes): - // 8 value, 1 script len, 34 script [1 OP_DATA_32, - // 32 pubkey, 1 OP_CHECKSIG] - // - // Input (105 bytes): - // 36 prev outpoint, 1 script len, 64 script [1 OP_DATA_64, - // 64 sig], 4 sequence - // - // The most common scripts are pay-to-pubkey, and as per the above - // breakdown, the minimum size of a p2pk input script is 148 bytes. So - // that figure is used. - let total_serialized_size = mass::transaction_output_estimated_serialized_size(transaction_output) + 148; - - // The output is considered dust if the cost to the network to spend the - // coins is more than 1/3 of the minimum free transaction relay fee. - // mp.config.MinimumRelayTransactionFee is in sompi/KB, so multiply - // by 1000 to convert to bytes. - // - // Using the typical values for a pay-to-pubkey transaction from - // the breakdown above and the default minimum free transaction relay - // fee of 1000, this equates to values less than 546 sompi being - // considered dust. - // - // The following is equivalent to (value/total_serialized_size) * (1/3) * 1000 - // without needing to do floating point math. - // - // Since the multiplication may overflow a u64, 2 separate calculation paths - // are considered to avoid overflowing. - match transaction_output.value.checked_mul(1000) { - Some(value_1000) => value_1000 / (3 * total_serialized_size) < self.config.minimum_relay_transaction_fee, - None => { - (transaction_output.value as u128 * 1000 / (3 * total_serialized_size as u128)) - < self.config.minimum_relay_transaction_fee as u128 - } - } - } - /// check_transaction_standard_in_context performs a series of checks on a transaction's /// inputs to ensure they are "standard". A standard transaction input within the /// context of this function is one whose referenced public key script is of a @@ -163,10 +46,6 @@ impl Mempool { /// into the mempool and relay. pub(crate) fn check_transaction_standard_in_context(&self, transaction: &MutableTransaction) -> NonStandardResult<()> { let transaction_id = transaction.id(); - let contextual_mass = transaction.tx.mass(); - if contextual_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { - return Err(NonStandardError::RejectStorageMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); - } for (i, input) in transaction.tx.inputs.iter().enumerate() { // It is safe to elide existence and index checks here since // they have already been checked prior to calling this @@ -179,7 +58,11 @@ impl Mempool { ScriptClass::PubKey => {} ScriptClass::PubKeyECDSA => {} ScriptClass::ScriptHash => { - // todo relax due to on fly calculation + // TODO: relax due to on the fly sigop calculation + // Possible options: + // 1. remove all together and rely on compute mass limits + // 2. extract an upper bound on the committed value from input.mass and min + // with the static count (relying on validation to fail if the commitment is wrong) let num_sig_ops = get_sig_op_count_upper_bound::( &input.signature_script, &entry.script_public_key, @@ -189,14 +72,26 @@ impl Mempool { } } } + } - // TODO: For now, until wallets adapt, we only require minimum fee as function of compute mass (but the fee/mass ratio will - // use the max over all masses and will affect tx selection to block template) - let minimum_fee = - self.minimum_required_transaction_relay_fee(transaction.calculated_non_contextual_masses.unwrap().compute_mass); - if transaction.calculated_fee.unwrap() < minimum_fee { - return Err(NonStandardError::RejectInsufficientFee(transaction_id, transaction.calculated_fee.unwrap(), minimum_fee)); - } + // Minimum relay fee applies to normalized non-contextual mass so block-space usage has a + // minimum cost, whether dominated by compute or by transient byte footprint. + // Storage mass does not require an additional relay-fee floor here since storage growth is + // sufficiently protected even under worst-case block-limit usage. + // Use the post-activation cofactors for fee pricing even before activation: activation policy + // should only change which transient limit is allowed, not reprice the same transaction. + let masses = transaction.calculated_non_contextual_masses.unwrap(); + let cofactors = self.config.mempool_mass_cofactors.after(); + let normalized_transient_mass = masses.normalized_transient(&cofactors); + let fee_mass = masses.compute_mass.max(normalized_transient_mass); + let minimum_fee = self.minimum_required_transaction_relay_fee(fee_mass); + let fee = transaction.calculated_fee.unwrap(); + if fee < minimum_fee { + return if masses.compute_mass >= normalized_transient_mass { + Err(NonStandardError::RejectInsufficientComputeFee(transaction_id, fee, minimum_fee, masses.compute_mass)) + } else { + Err(NonStandardError::RejectInsufficientTransientFee(transaction_id, fee, minimum_fee, normalized_transient_mass)) + }; } Ok(()) @@ -232,21 +127,21 @@ mod tests { }; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ - config::constants::consensus::DEFAULT_GAS_PER_LANE_LIMIT, config::params::Params, constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, mass::NonContextualMasses, network::NetworkType, subnets::SUBNETWORK_ID_NATIVE, - tx::{ScriptPublicKey, ScriptVec, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput}, + tx::{ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry}, }; use kaspa_txscript::{ opcodes::codes::{OpReturn, OpTrue}, script_builder::ScriptBuilder, }; - use smallvec::smallvec; use std::sync::Arc; + const RELAY_FEE_TEST_MASS: u64 = 500_000; + #[test] fn test_calc_min_required_tx_relay_fee() { struct Test { @@ -272,10 +167,10 @@ mod tests { want: 100, }, Test { - name: "max standard tx size with default minimum relay fee", - size: MAXIMUM_STANDARD_TRANSACTION_MASS, + name: "large relay fee test mass with default minimum relay fee", + size: RELAY_FEE_TEST_MASS, minimum_relay_transaction_fee: DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, - want: MAXIMUM_STANDARD_TRANSACTION_MASS, + want: RELAY_FEE_TEST_MASS, }, Test { name: "1500 bytes with 5000 relay fee", size: 1500, minimum_relay_transaction_fee: 5000, want: 7500 }, Test { name: "1500 bytes with 3000 relay fee", size: 1500, minimum_relay_transaction_fee: 3000, want: 4500 }, @@ -287,8 +182,12 @@ mod tests { for test in tests.iter() { for net in NetworkType::iter() { let params: Params = net.into(); - let mut config = - Config::build_default(params.target_time_per_block(), false, params.block_mass_limits, params.block_lane_limits); + let mut config = Config::build_default( + params.target_time_per_block(), + false, + params.mempool_block_mass_limits(), + params.block_lane_limits, + ); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; let counters = Arc::new(MiningCounters::default()); let mempool = Mempool::new(Arc::new(config), counters); @@ -302,93 +201,6 @@ mod tests { } } - #[test] - fn test_is_transaction_output_dust() { - let script_public_key = ScriptPublicKey::new( - 0, - smallvec![ - 0x76, 0xa9, 0x21, 0x03, 0x2f, 0x7e, 0x43, 0x0a, 0xa4, 0xc9, 0xd1, 0x59, 0x43, 0x7e, 0x84, 0xb9, 0x75, 0xdc, 0x76, - 0xd9, 0x00, 0x3b, 0xf0, 0x92, 0x2c, 0xf3, 0xaa, 0x45, 0x28, 0x46, 0x4b, 0xab, 0x78, 0x0d, 0xba, 0x5e - ], - ); - let invalid_script_public_key = ScriptPublicKey::new(0, smallvec![0x01]); - - struct Test { - name: &'static str, - tx_out: TransactionOutput, - minimum_relay_transaction_fee: u64, - is_dust: bool, - } - - let tests = vec![ - // Any value is allowed with a zero relay fee. - Test { - name: "zero value with zero relay fee", - tx_out: TransactionOutput::new(0, script_public_key.clone()), - minimum_relay_transaction_fee: 0, - is_dust: false, - }, - // Zero value is dust with any relay fee" - Test { - name: "zero value with very small tx fee", - tx_out: TransactionOutput::new(0, script_public_key.clone()), - minimum_relay_transaction_fee: 1, - is_dust: true, - }, - Test { - name: "36 byte public key script with value 605", - tx_out: TransactionOutput::new(605, script_public_key.clone()), - minimum_relay_transaction_fee: 1000, - is_dust: true, - }, - Test { - name: "36 byte public key script with value 606", - tx_out: TransactionOutput::new(606, script_public_key.clone()), - minimum_relay_transaction_fee: 1000, - is_dust: false, - }, - // Maximum allowed value is never dust. - Test { - name: "max sompi amount is never dust", - tx_out: TransactionOutput::new(MAX_SOMPI, script_public_key.clone()), - minimum_relay_transaction_fee: 1000, - is_dust: false, - }, - // Maximum uint64 value causes NO overflow. - // Rust rewrite: caution, this differs from the golang version - Test { - name: "maximum uint64 value", - tx_out: TransactionOutput::new(u64::MAX, script_public_key), - minimum_relay_transaction_fee: u64::MAX, - is_dust: false, - }, - // Unspendable script_public_key due to an invalid public key script. - Test { - name: "unspendable script_public_key", - tx_out: TransactionOutput::new(5000, invalid_script_public_key), - minimum_relay_transaction_fee: 0, - is_dust: true, - }, - ]; - for test in tests { - for net in NetworkType::iter() { - let params: Params = net.into(); - let mut config = - Config::build_default(params.target_time_per_block(), false, params.block_mass_limits, params.block_lane_limits); - config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; - let counters = Arc::new(MiningCounters::default()); - let mempool = Mempool::new(Arc::new(config), counters); - - println!("test_is_transaction_output_dust test '{}' ", test.name); - let res = mempool.is_transaction_output_dust(&test.tx_out); - if res != test.is_dust { - println!("test_is_transaction_output_dust test '{}' failed: got {}, want {}", test.name, res, test.is_dust); - } - assert_eq!(test.is_dust, res); - } - } - } - #[test] fn test_check_transaction_standard_in_isolation() { // Create some dummy, but otherwise standard, data for transactions. @@ -446,65 +258,6 @@ mod tests { ), is_standard: true, // check_transaction_standard_in_isolation does not check version }, - Test { - name: "Transaction gas exceeds the per-lane limit", - mtx: new_mtx( - Transaction::new( - TX_VERSION, - vec![dummy_tx_input.clone()], - vec![dummy_tx_out.clone()], - 0, - SUBNETWORK_ID_NATIVE, - DEFAULT_GAS_PER_LANE_LIMIT + 1, - vec![], - ), - 1000, - ), - is_standard: false, - }, - Test { - name: "Transaction size is too large", - mtx: new_mtx( - Transaction::new( - TX_VERSION, - vec![dummy_tx_input.clone()], - vec![TransactionOutput::new( - 0u64, - ScriptPublicKey::new( - MAX_SCRIPT_PUBLIC_KEY_VERSION, - ScriptVec::from_vec(vec![0u8; MAXIMUM_STANDARD_TRANSACTION_MASS as usize + 1]), - ), - )], - 0, - SUBNETWORK_ID_NATIVE, - 0, - vec![], - ), - 1000, - ), - is_standard: false, - }, - Test { - name: "Signature script size is too large", - mtx: new_mtx( - Transaction::new( - TX_VERSION + 1, - vec![TransactionInput::new( - dummy_prev_out, - vec![0u8; MAXIMUM_STANDARD_SIGNATURE_SCRIPT_SIZE as usize + 1], - MAX_TX_IN_SEQUENCE_NUM, - 1, - )], - vec![dummy_tx_out.clone()], - 0, - SUBNETWORK_ID_NATIVE, - 0, - vec![], - ), - 1000, - ), - is_standard: false, - }, Test { name: "Valid but non standard public key script", mtx: new_mtx( @@ -527,22 +280,6 @@ mod tests { ), is_standard: false, }, - Test { - name: "Dust output", - mtx: new_mtx( - Transaction::new( - TX_VERSION, - vec![dummy_tx_input.clone()], - vec![TransactionOutput::new(0, dummy_tx_out.script_public_key)], - 0, - SUBNETWORK_ID_NATIVE, - 0, - vec![], - ), - 1000, - ), - is_standard: false, - }, Test { name: "Null-data transaction", mtx: new_mtx( @@ -570,8 +307,12 @@ mod tests { for test in tests { for net in NetworkType::iter() { let params: Params = net.into(); - let config = - Config::build_default(params.target_time_per_block(), false, params.block_mass_limits, params.block_lane_limits); + let config = Config::build_default( + params.target_time_per_block(), + false, + params.mempool_block_mass_limits(), + params.block_lane_limits, + ); let counters = Arc::new(MiningCounters::default()); let mempool = Mempool::new(Arc::new(config), counters); @@ -596,4 +337,103 @@ mod tests { } } } + + #[test] + fn test_check_transaction_standard_in_context() { + let addr = Address::new(Prefix::Testnet, Version::PubKey, &[1u8; 32]); + let standard_script_public_key = kaspa_txscript::pay_to_address_script(&addr); + let non_standard_script_public_key = + ScriptPublicKey::new(MAX_SCRIPT_PUBLIC_KEY_VERSION, ScriptBuilder::new().add_op(OpTrue).unwrap().script().into()); + + enum Expected { + Standard, + RejectInputScriptClass, + RejectInsufficientComputeFee { fee: u64, minimum_fee: u64, compute_mass: u64 }, + RejectInsufficientTransientFee { fee: u64, minimum_fee: u64, normalized_transient_mass: u64 }, + } + + struct Test { + name: &'static str, + mtx: MutableTransaction, + expected: Expected, + } + + fn new_mtx(script_public_key: ScriptPublicKey, masses: NonContextualMasses, fee: u64) -> MutableTransaction { + let prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let input = TransactionInput::new(prev_out, vec![], MAX_TX_IN_SEQUENCE_NUM, 1); + let tx = Transaction::new( + TX_VERSION, + vec![input], + vec![TransactionOutput::new(SOMPI_PER_KASPA, script_public_key.clone())], + 0, + SUBNETWORK_ID_NATIVE, + 0, + vec![], + ); + let mut mtx = MutableTransaction::with_entries( + tx.into(), + vec![UtxoEntry::new(2 * SOMPI_PER_KASPA, script_public_key, 0, false, None)], + ); + mtx.calculated_non_contextual_masses = Some(masses); + mtx.calculated_fee = Some(fee); + mtx + } + + let tests = vec![ + Test { + name: "standard input with sufficient fee", + mtx: new_mtx(standard_script_public_key.clone(), NonContextualMasses::new(1_000, 500), 1_000), + expected: Expected::Standard, + }, + Test { + name: "non-standard input script class", + mtx: new_mtx(non_standard_script_public_key, NonContextualMasses::new(1_000, 1_000), 1_000), + expected: Expected::RejectInputScriptClass, + }, + Test { + name: "compute mass triggers insufficient relay fee", + mtx: new_mtx(standard_script_public_key.clone(), NonContextualMasses::new(10_000, 1), 9_999), + expected: Expected::RejectInsufficientComputeFee { fee: 9_999, minimum_fee: 10_000, compute_mass: 10_000 }, + }, + Test { + name: "transient mass triggers insufficient relay fee", + mtx: new_mtx(standard_script_public_key, NonContextualMasses::new(1, 20_000), 9_999), + expected: Expected::RejectInsufficientTransientFee { + fee: 9_999, + minimum_fee: 10_000, + normalized_transient_mass: 10_000, + }, + }, + ]; + + // Use simnet params so prior and post-activation transient limits differ while the fork is active; + // this verifies that relay-fee pricing uses stable post-activation cofactors. + let params: Params = NetworkType::Simnet.into(); + let config = + Config::build_default(params.target_time_per_block(), false, params.mempool_block_mass_limits(), params.block_lane_limits); + let counters = Arc::new(MiningCounters::default()); + let mempool = Mempool::new(Arc::new(config), counters); + + for test in tests { + let res = mempool.check_transaction_standard_in_context(&test.mtx); + match test.expected { + Expected::Standard => assert_eq!(res, Ok(()), "failed for test '{}'", test.name), + Expected::RejectInputScriptClass => { + assert_eq!(res, Err(NonStandardError::RejectInputScriptClass(test.mtx.id(), 0)), "failed for test '{}'", test.name) + } + Expected::RejectInsufficientComputeFee { fee, minimum_fee, compute_mass } => assert_eq!( + res, + Err(NonStandardError::RejectInsufficientComputeFee(test.mtx.id(), fee, minimum_fee, compute_mass)), + "failed for test '{}'", + test.name + ), + Expected::RejectInsufficientTransientFee { fee, minimum_fee, normalized_transient_mass } => assert_eq!( + res, + Err(NonStandardError::RejectInsufficientTransientFee(test.mtx.id(), fee, minimum_fee, normalized_transient_mass)), + "failed for test '{}'", + test.name + ), + } + } + } } diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index cc2ba0f0d3..66bb381ada 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -1,4 +1,7 @@ -use kaspa_consensus_core::mass::{BlockLaneLimits, BlockMassLimits, MassCofactors}; +use kaspa_consensus_core::{ + config::params::ForkedParam, + mass::{BlockLaneLimits, BlockMassLimits, MassCofactors}, +}; pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: usize = 1_000_000; pub(crate) const DEFAULT_MEMPOOL_SIZE_LIMIT: usize = 1_000_000_000; @@ -34,7 +37,8 @@ pub struct Config { pub maximum_orphan_transaction_normalized_mass: u64, pub maximum_orphan_transaction_count: u64, pub accept_non_standard: bool, - pub block_mass_cofactors: MassCofactors, + pub mempool_block_mass_limits: ForkedParam, + pub mempool_mass_cofactors: ForkedParam, pub block_lane_limits: BlockLaneLimits, pub minimum_relay_transaction_fee: u64, pub network_blocks_per_second: u64, @@ -57,11 +61,13 @@ impl Config { maximum_orphan_transaction_normalized_mass: u64, maximum_orphan_transaction_count: u64, accept_non_standard: bool, - block_mass_cofactors: MassCofactors, + mempool_block_mass_limits: impl Into>, block_lane_limits: BlockLaneLimits, minimum_relay_transaction_fee: u64, network_blocks_per_second: u64, ) -> Self { + let mempool_block_mass_limits = mempool_block_mass_limits.into(); + let mempool_mass_cofactors = mempool_block_mass_limits.map(|limits| limits.cofactors()); Self { maximum_transaction_count, mempool_size_limit, @@ -77,7 +83,8 @@ impl Config { maximum_orphan_transaction_normalized_mass, maximum_orphan_transaction_count, accept_non_standard, - block_mass_cofactors, + mempool_block_mass_limits, + mempool_mass_cofactors, block_lane_limits, minimum_relay_transaction_fee, network_blocks_per_second, @@ -85,14 +92,15 @@ impl Config { } /// Build a default config. - /// The arguments should be obtained from the current consensus [`kaspa_consensus_core::config::params::Params`] instance. + /// The mass limits should be obtained from [`kaspa_consensus_core::config::params::Params::mempool_block_mass_limits`]. pub fn build_default( target_milliseconds_per_block: u64, relay_non_std_transactions: bool, - block_mass_limits: BlockMassLimits, + mempool_block_mass_limits: impl Into>, block_lane_limits: BlockLaneLimits, ) -> Self { - let block_mass_cofactors = block_mass_limits.cofactors(); + let mempool_block_mass_limits = mempool_block_mass_limits.into(); + let mempool_mass_cofactors = mempool_block_mass_limits.map(|limits| limits.cofactors()); Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, mempool_size_limit: DEFAULT_MEMPOOL_SIZE_LIMIT, @@ -111,7 +119,8 @@ impl Config { maximum_orphan_transaction_normalized_mass: DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_NORMALIZED_MASS, maximum_orphan_transaction_count: DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_COUNT, accept_non_standard: relay_non_std_transactions, - block_mass_cofactors, + mempool_block_mass_limits, + mempool_mass_cofactors, block_lane_limits, minimum_relay_transaction_fee: DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, network_blocks_per_second: 1000 / target_milliseconds_per_block, diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index e007436cbf..b8c3fa49fc 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -19,6 +19,7 @@ use kaspa_consensus_core::{ use kaspa_core::time::Stopwatch; use std::sync::Arc; +pub(crate) mod check_transaction_limits; pub(crate) mod check_transaction_standard; pub mod config; pub mod errors; diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index ea1f7deb07..3f4db24bc7 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -71,7 +71,7 @@ impl OrphanPool { return Ok(()); } self.check_orphan_duplicate(&transaction)?; - self.check_orphan_mass(&transaction)?; + self.check_orphan_mass(virtual_daa_score, &transaction)?; self.check_orphan_double_spend(&transaction)?; // Make sure there is room in the pool for the new transaction self.limit_orphan_pool_size(1)?; @@ -98,8 +98,9 @@ impl OrphanPool { Ok(()) } - fn check_orphan_mass(&self, transaction: &MutableTransaction) -> RuleResult<()> { - let normalized_mass = transaction.calculated_non_contextual_masses.unwrap().normalized_max(&self.config.block_mass_cofactors); + fn check_orphan_mass(&self, virtual_daa_score: u64, transaction: &MutableTransaction) -> RuleResult<()> { + let cofactors = self.config.mempool_mass_cofactors.get(virtual_daa_score); + let normalized_mass = transaction.calculated_non_contextual_masses.unwrap().normalized_max(&cofactors); if normalized_mass > self.config.maximum_orphan_transaction_normalized_mass { return Err(RuleError::RejectBadOrphanMass(normalized_mass, self.config.maximum_orphan_transaction_normalized_mass)); } diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 7a94e68585..e379672270 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -124,7 +124,8 @@ impl TransactionsPool { let parents = self.get_parent_transaction_ids_in_pool(&transaction.mtx); self.parent_transactions.insert(id, parents.clone()); if parents.is_empty() { - self.ready_transactions.insert(FeerateTransactionKey::from_tx(&transaction, &self.config.block_mass_cofactors)); + let cofactors = self.config.mempool_mass_cofactors.get(transaction.added_at_daa_score); + self.ready_transactions.insert(FeerateTransactionKey::from_tx(&transaction, &cofactors)); } for parent_id in parents { let entry = self.chained_transactions.entry(parent_id).or_default(); @@ -154,7 +155,8 @@ impl TransactionsPool { parents.remove(transaction_id); if parents.is_empty() { let tx = self.all_transactions.get(chain).unwrap(); - self.ready_transactions.insert(FeerateTransactionKey::from_tx(tx, &self.config.block_mass_cofactors)); + let cofactors = self.config.mempool_mass_cofactors.get(tx.added_at_daa_score); + self.ready_transactions.insert(FeerateTransactionKey::from_tx(tx, &cofactors)); } } } @@ -165,7 +167,8 @@ impl TransactionsPool { // Remove the transaction itself let removed_tx = self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id))?; - self.ready_transactions.remove(&FeerateTransactionKey::from_tx(&removed_tx, &self.config.block_mass_cofactors)); + let cofactors = self.config.mempool_mass_cofactors.get(removed_tx.added_at_daa_score); + self.ready_transactions.remove(&FeerateTransactionKey::from_tx(&removed_tx, &cofactors)); // TODO: consider using `self.parent_transactions.get(transaction_id)` // The tradeoff to consider is whether it might be possible that a parent tx exists in the pool @@ -206,7 +209,9 @@ impl TransactionsPool { /// Dynamically builds a transaction selector based on the specific state of the ready transactions frontier pub(crate) fn build_selector(&self) -> Box { - self.ready_transactions.build_selector(&Policy::new(self.config.block_mass_cofactors.reference, self.config.block_lane_limits)) + self.ready_transactions + // Params::mempool_block_mass_cofactors asserts that the reference mass is stable across activation. + .build_selector(&Policy::new(self.config.mempool_mass_cofactors.after().reference, self.config.block_lane_limits)) } /// Builds a feerate estimator based on internal state of the ready transactions frontier @@ -225,6 +230,7 @@ impl TransactionsPool { &self, transaction: &MutableTransaction, transaction_size: usize, + virtual_daa_score: u64, ) -> RuleResult> { // No eviction needed -- return if self.len() < self.config.maximum_transaction_count @@ -234,7 +240,8 @@ impl TransactionsPool { } // Returns a vector of transactions to be removed (the caller has to actually remove) - let feerate_threshold = transaction.calculated_feerate(&self.config.block_mass_cofactors).unwrap(); + let pending_cofactors = self.config.mempool_mass_cofactors.get(virtual_daa_score); + let feerate_threshold = transaction.calculated_feerate(&pending_cofactors).unwrap(); let mut txs_to_remove = Vec::with_capacity(1); // Normally we expect a single removal let mut selection_overall_size = 0; for tx in self @@ -250,7 +257,8 @@ impl TransactionsPool { } // We are iterating ready txs by ascending feerate so the pending tx has lower feerate than all remaining txs - if tx.feerate(&self.config.block_mass_cofactors) > feerate_threshold { + let tx_cofactors = self.config.mempool_mass_cofactors.get(tx.added_at_daa_score); + if tx.feerate(&tx_cofactors) > feerate_threshold { let err = RuleError::RejectMempoolIsFull; debug!("Transaction {} with feerate {} has been rejected: {}", transaction.id(), feerate_threshold, err); return Err(err); diff --git a/mining/src/mempool/replace_by_fee.rs b/mining/src/mempool/replace_by_fee.rs index c0f39941cb..786cd6e2cf 100644 --- a/mining/src/mempool/replace_by_fee.rs +++ b/mining/src/mempool/replace_by_fee.rs @@ -17,6 +17,7 @@ impl Mempool { &self, transaction: &MutableTransaction, rbf_policy: RbfPolicy, + virtual_daa_score: u64, ) -> RuleResult> { match rbf_policy { RbfPolicy::Forbidden => { @@ -34,7 +35,7 @@ impl Mempool { let mut feerate_threshold = 0f64; for double_spend in double_spends { // We take the max over all double spends as the required threshold - feerate_threshold = feerate_threshold.max(self.get_double_spend_feerate(&double_spend)?); + feerate_threshold = feerate_threshold.max(self.get_double_spend_feerate(&double_spend, virtual_daa_score)?); } Ok(Some(feerate_threshold)) } @@ -46,7 +47,7 @@ impl Mempool { match double_spends.len() { 0 => Err(RuleError::RejectRbfNoDoubleSpend), 1 => { - let feerate_threshold = self.get_double_spend_feerate(&double_spends[0])?; + let feerate_threshold = self.get_double_spend_feerate(&double_spends[0], virtual_daa_score)?; Ok(Some(feerate_threshold)) } _ => Err(RuleError::RejectRbfTooManyDoubleSpendingTransactions), @@ -55,6 +56,24 @@ impl Mempool { } } + /// Validates the same RBF policy constraints as [`Self::get_replace_by_fee_constraint`], + /// but skips computing the feerate threshold for callers that do not need it. + pub(super) fn validate_replace_by_fee_policy_constraints( + &self, + transaction: &MutableTransaction, + rbf_policy: RbfPolicy, + ) -> RuleResult<()> { + match rbf_policy { + RbfPolicy::Forbidden => self.transaction_pool.check_double_spends(transaction), + RbfPolicy::Allowed => Ok(()), + RbfPolicy::Mandatory => match self.transaction_pool.get_double_spend_transaction_ids(transaction).len() { + 0 => Err(RuleError::RejectRbfNoDoubleSpend), + 1 => Ok(()), + _ => Err(RuleError::RejectRbfTooManyDoubleSpendingTransactions), + }, + } + } + /// Executes replace by fee (RBF) for an incoming transaction and a policy. /// /// See [`RbfPolicy`] variants for details of each policy process and success conditions. @@ -66,6 +85,7 @@ impl Mempool { &mut self, transaction: &MutableTransaction, rbf_policy: RbfPolicy, + virtual_daa_score: u64, ) -> RuleResult>> { match rbf_policy { RbfPolicy::Forbidden => { @@ -78,10 +98,14 @@ impl Mempool { match double_spends.is_empty() { true => Ok(None), false => { - let removed = self.validate_double_spending_transaction(transaction, &double_spends[0])?.mtx.tx.clone(); + let removed = self + .validate_double_spending_transaction(transaction, &double_spends[0], virtual_daa_score)? + .mtx + .tx + .clone(); for double_spend in double_spends.iter().skip(1) { // Validate the feerate threshold is passed for all double spends - self.validate_double_spending_transaction(transaction, double_spend)?; + self.validate_double_spending_transaction(transaction, double_spend, virtual_daa_score)?; } // We apply consequences such as removal only after we fully validate against all double spends for double_spend in double_spends { @@ -102,7 +126,11 @@ impl Mempool { match double_spends.len() { 0 => Err(RuleError::RejectRbfNoDoubleSpend), 1 => { - let removed = self.validate_double_spending_transaction(transaction, &double_spends[0])?.mtx.tx.clone(); + let removed = self + .validate_double_spending_transaction(transaction, &double_spends[0], virtual_daa_score)? + .mtx + .tx + .clone(); self.remove_transaction( &double_spends[0].owner_id, true, @@ -117,9 +145,12 @@ impl Mempool { } } - fn get_double_spend_feerate(&self, double_spend: &DoubleSpend) -> RuleResult { + fn get_double_spend_feerate(&self, double_spend: &DoubleSpend, virtual_daa_score: u64) -> RuleResult { let owner = self.transaction_pool.get_double_spend_owner(double_spend)?; - match owner.mtx.calculated_feerate(&self.config.block_mass_cofactors) { + // RBF compares both transactions under the current mempool policy. The owner's + // added_at_daa_score is only needed when rebuilding its tx-pool frontier key. + let cofactors = self.config.mempool_mass_cofactors.get(virtual_daa_score); + match owner.mtx.calculated_feerate(&cofactors) { Some(double_spend_feerate) => Ok(double_spend_feerate), // Getting here is unexpected since a mempool owned tx should be populated with fee // and mass at this stage but nonetheless we fail gracefully @@ -131,12 +162,13 @@ impl Mempool { &'a self, transaction: &MutableTransaction, double_spend: &DoubleSpend, + virtual_daa_score: u64, ) -> RuleResult<&'a MempoolTransaction> { let owner = self.transaction_pool.get_double_spend_owner(double_spend)?; - if let (Some(transaction_feerate), Some(double_spend_feerate)) = ( - transaction.calculated_feerate(&self.config.block_mass_cofactors), - owner.mtx.calculated_feerate(&self.config.block_mass_cofactors), - ) { + let cofactors = self.config.mempool_mass_cofactors.get(virtual_daa_score); + if let (Some(transaction_feerate), Some(double_spend_feerate)) = + (transaction.calculated_feerate(&cofactors), owner.mtx.calculated_feerate(&cofactors)) + { if transaction_feerate > double_spend_feerate { return Ok(owner); } else { diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 8a4fcb4a5f..97dabe1715 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -23,11 +23,15 @@ impl Mempool { mut transaction: MutableTransaction, rbf_policy: RbfPolicy, ) -> RuleResult { - self.validate_transaction_unacceptance(&transaction)?; - // Populate mass and estimated_size in the beginning, it will be used in multiple places throughout the validation and insertion. + let transaction_id = transaction.id(); + self.validate_transaction_unacceptance(transaction_id)?; + self.validate_transaction_not_duplicate(transaction_id)?; + // Populate non-contextual masses up front, they will be used in multiple places throughout validation and insertion. transaction.calculated_non_contextual_masses = Some(consensus.calculate_transaction_non_contextual_masses(&transaction.tx)?); - self.validate_transaction_in_isolation(&transaction)?; - let feerate_threshold = self.get_replace_by_fee_constraint(&transaction, rbf_policy)?; + let virtual_daa_score = consensus.get_virtual_daa_score(); + self.validate_transaction_limits_in_isolation(&transaction, virtual_daa_score)?; + self.validate_transaction_std_in_isolation(&transaction)?; + let feerate_threshold = self.get_replace_by_fee_constraint(&transaction, rbf_policy, virtual_daa_score)?; self.populate_mempool_entries(&mut transaction); Ok(TransactionPreValidation { transaction, feerate_threshold }) } @@ -43,16 +47,12 @@ impl Mempool { ) -> RuleResult { let transaction_id = transaction.id(); - // First check if the transaction was not already added to the mempool. + // Check if the transaction was accepted or already added to the mempool. // The case may arise since the execution of the manager public functions is no - // longer atomic and different code paths may lead to inserting the same transaction + // longer atomic and different code paths may accept or insert the same transaction // concurrently. - if self.transaction_pool.has(&transaction_id) { - debug!("Transaction {0} is not post validated since already in the mempool", transaction_id); - return Err(RuleError::RejectDuplicate(transaction_id)); - } - - self.validate_transaction_unacceptance(&transaction)?; + self.validate_transaction_unacceptance(transaction_id)?; + self.validate_transaction_not_duplicate(transaction_id)?; match validation_result { Ok(_) => {} @@ -60,8 +60,9 @@ impl Mempool { if orphan == Orphan::Forbidden { return Err(RuleError::RejectDisallowedOrphan(transaction_id)); } - let _ = self.get_replace_by_fee_constraint(&transaction, rbf_policy)?; - self.orphan_pool.try_add_orphan(consensus.get_virtual_daa_score(), transaction, priority)?; + self.validate_replace_by_fee_policy_constraints(&transaction, rbf_policy)?; + let virtual_daa_score = consensus.get_virtual_daa_score(); + self.orphan_pool.try_add_orphan(virtual_daa_score, transaction, priority)?; return Ok(TransactionPostValidation::default()); } Err(err) => { @@ -69,11 +70,14 @@ impl Mempool { } } + let virtual_daa_score = consensus.get_virtual_daa_score(); + // Perform mempool in-context validations prior to possible RBF replacements - self.validate_transaction_in_context(&transaction)?; + self.validate_transaction_limits_in_context(&transaction, virtual_daa_score)?; + self.validate_transaction_std_in_context(&transaction)?; // Check double spends and try to remove them if the RBF policy requires it - let removed_transaction = self.execute_replace_by_fee(&transaction, rbf_policy)?; + let removed_transaction = self.execute_replace_by_fee(&transaction, rbf_policy, virtual_daa_score)?; // // Note: there exists a case below where `limit_transaction_count` returns an error signaling that @@ -85,7 +89,7 @@ impl Mempool { // Before adding the transaction, check if there is room in the pool let transaction_size = transaction.mempool_estimated_bytes(); - let txs_to_remove = self.transaction_pool.limit_transaction_count(&transaction, transaction_size)?; + let txs_to_remove = self.transaction_pool.limit_transaction_count(&transaction, transaction_size, virtual_daa_score)?; if !txs_to_remove.is_empty() { let transaction_pool_len_before = self.transaction_pool.len(); for x in txs_to_remove.iter() { @@ -120,38 +124,36 @@ impl Mempool { ); // Add the transaction to the mempool as a MempoolTransaction and return a clone of the embedded Arc - let accepted_transaction = self - .transaction_pool - .add_transaction(transaction, consensus.get_virtual_daa_score(), priority, transaction_size)? - .mtx - .tx - .clone(); + let accepted_transaction = + self.transaction_pool.add_transaction(transaction, virtual_daa_score, priority, transaction_size)?.mtx.tx.clone(); Ok(TransactionPostValidation { removed: removed_transaction, accepted: Some(accepted_transaction) }) } /// Validates that the transaction wasn't already accepted into the DAG - fn validate_transaction_unacceptance(&self, transaction: &MutableTransaction) -> RuleResult<()> { + fn validate_transaction_unacceptance(&self, transaction_id: TransactionId) -> RuleResult<()> { // Reject if the transaction is registered as an accepted transaction - let transaction_id = transaction.id(); match self.accepted_transactions.has(&transaction_id) { true => Err(RuleError::RejectAlreadyAccepted(transaction_id)), false => Ok(()), } } - fn validate_transaction_in_isolation(&self, transaction: &MutableTransaction) -> RuleResult<()> { - let transaction_id = transaction.id(); + fn validate_transaction_not_duplicate(&self, transaction_id: TransactionId) -> RuleResult<()> { if self.transaction_pool.has(&transaction_id) { return Err(RuleError::RejectDuplicate(transaction_id)); } + Ok(()) + } + + fn validate_transaction_std_in_isolation(&self, transaction: &MutableTransaction) -> RuleResult<()> { if !self.config.accept_non_standard { self.check_transaction_standard_in_isolation(transaction)?; } Ok(()) } - fn validate_transaction_in_context(&self, transaction: &MutableTransaction) -> RuleResult<()> { + fn validate_transaction_std_in_context(&self, transaction: &MutableTransaction) -> RuleResult<()> { if !self.config.accept_non_standard { self.check_transaction_standard_in_context(transaction)?; } @@ -230,8 +232,8 @@ impl Mempool { let transaction = transactions.pop().unwrap(); let rbf_policy = Self::get_orphan_transaction_rbf_policy(transaction.priority); - self.validate_transaction_unacceptance(&transaction.mtx)?; - let _ = self.get_replace_by_fee_constraint(&transaction.mtx, rbf_policy)?; + self.validate_transaction_unacceptance(transaction.mtx.id())?; + self.validate_replace_by_fee_policy_constraints(&transaction.mtx, rbf_policy)?; Ok(transaction) } diff --git a/mining/src/toccata_transient_mass_activation_tests.rs b/mining/src/toccata_transient_mass_activation_tests.rs new file mode 100644 index 0000000000..4b4fb0e6f4 --- /dev/null +++ b/mining/src/toccata_transient_mass_activation_tests.rs @@ -0,0 +1,490 @@ +//! TODO(post-toccata): Once the transient-mass activation window is behind us, reduce this module +//! to the durable mempool pipeline checks and move/rename it away from Toccata-specific activation +//! coverage. +//! +//! Remove with the activation plumbing: +//! - mined_templates_respect_consensus_transient_mass_across_mempool_delay +//! - mined_template_handles_transactions_added_on_both_sides_of_mempool_delay +//! - rbf_lower_fee_replacement_is_rejected_at_delayed_mempool_activation_boundary +//! - template_limits_reject_transient_tx_until_delayed_mempool_activation +//! +//! Keep as durable pipeline checks: +//! - template_limits_reject_compute_tx_before_consensus_validation +//! - template_limits_reject_storage_tx_after_consensus_validation +//! - template_limits_reject_gas_even_when_non_standard_transactions_are_allowed +//! +//! The durable checks prove that block-limit admission is not standardness: gas and compute +//! rejections happen before consensus in-context validation and script work, while storage +//! rejection happens only after consensus populates contextual mass. They protect the selector +//! invariant that every tx admitted to the pool can fit in a block under the active consensus +//! block limits. + +use crate::{ + MiningCounters, + errors::MiningManagerError, + manager::MiningManager, + mempool::{ + config::Config, + errors::RuleError, + tx::{Orphan, Priority, RbfPolicy}, + }, +}; +use kaspa_consensus_core::{ + api::{ + ConsensusApi, + args::{TransactionValidationArgs, TransactionValidationBatchArgs}, + }, + block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector, VirtualStateApproxId}, + coinbase::MinerData, + config::{ + constants::consensus::{DEFAULT_GAS_PER_LANE_LIMIT, DEFAULT_LANES_PER_BLOCK_LIMIT}, + params::{ForkActivation, ForkedParam, Params, SIMNET_PARAMS}, + }, + constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, + errors::{ + block::RuleError as BlockRuleError, + coinbase::CoinbaseResult, + tx::{TxResult, TxRuleError}, + }, + header::{CompressedParents, Header}, + mass::{BlockLaneLimits, BlockMassLimits, ContextualMasses, Mass, MassCalculator, MassCofactors, NonContextualMasses}, + merkle::calc_hash_merkle_root, + subnets::{SUBNETWORK_ID_COINBASE, SUBNETWORK_ID_NATIVE}, + tx::{ + MutableTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput, + UtxoEntry, scriptvec, + }, +}; +use kaspa_core::time::unix_now; +use kaspa_hashes::{Hash, ZERO_HASH}; +use parking_lot::RwLock; +use std::{ + collections::{HashMap, HashSet}, + sync::{ + Arc, + atomic::{AtomicU64, Ordering}, + }, +}; + +const ACTIVATION_DAA_SCORE: u64 = 10_000; +const PRIOR_BLOCK_MASS_LIMIT: u64 = 500_000; +const NEW_TRANSIENT_LIMIT: u64 = 1_000_000; +const TARGET_TIME_PER_BLOCK: u64 = 100; +const BLOCK_LANE_LIMITS: BlockLaneLimits = + BlockLaneLimits { lanes_per_block: DEFAULT_LANES_PER_BLOCK_LIMIT, gas_per_lane: DEFAULT_GAS_PER_LANE_LIMIT }; + +struct MassPolicyTestConsensus { + virtual_daa_score: AtomicU64, + mass_calculator: MassCalculator, + mempool_mass_cofactors: ForkedParam, + validation_attempts: AtomicU64, + non_contextual_mass_overrides: RwLock>, + validated_thresholds: RwLock>, +} + +impl MassPolicyTestConsensus { + fn new(params: &Params) -> Self { + Self { + virtual_daa_score: AtomicU64::new(0), + mass_calculator: MassCalculator::new_with_consensus_params(params), + mempool_mass_cofactors: params.mempool_block_mass_cofactors(), + validation_attempts: AtomicU64::new(0), + non_contextual_mass_overrides: Default::default(), + validated_thresholds: Default::default(), + } + } + + fn set_virtual_daa_score(&self, virtual_daa_score: u64) { + self.virtual_daa_score.store(virtual_daa_score, Ordering::Relaxed); + } + + fn validated_thresholds(&self) -> Vec<(TransactionId, u64, f64)> { + self.validated_thresholds.read().clone() + } + + fn validation_attempts(&self) -> u64 { + self.validation_attempts.load(Ordering::Relaxed) + } + + fn set_non_contextual_masses(&self, transaction_id: TransactionId, masses: NonContextualMasses) { + self.non_contextual_mass_overrides.write().insert(transaction_id, masses); + } +} + +impl ConsensusApi for MassPolicyTestConsensus { + fn build_block_template( + &self, + miner_data: MinerData, + mut tx_selector: Box, + _build_mode: TemplateBuildMode, + ) -> Result { + let mut txs = tx_selector.select_transactions(); + let coinbase_miner_data = miner_data.clone(); + txs.insert( + 0, + Transaction::new( + TX_VERSION, + vec![], + vec![TransactionOutput::new(SOMPI_PER_KASPA, coinbase_miner_data.script_public_key)], + 0, + SUBNETWORK_ID_COINBASE, + 0, + coinbase_miner_data.extra_data, + ), + ); + + let now = unix_now(); + let header = Header::new_finalized( + 0, + CompressedParents::default(), + calc_hash_merkle_root(txs.iter()), + ZERO_HASH, + ZERO_HASH, + now, + 0, + 0, + 0, + 0.into(), + 0, + ZERO_HASH, + ); + + Ok(BlockTemplate::new(MutableBlock::new(header, txs), miner_data, false, now, 0, ZERO_HASH, vec![])) + } + + fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction, args: &TransactionValidationArgs) -> TxResult<()> { + self.validation_attempts.fetch_add(1, Ordering::Relaxed); + if !mutable_tx.is_verifiable() { + return Err(TxRuleError::MissingTxOutpoints); + } + let non_contextual_masses = mutable_tx.calculated_non_contextual_masses.expect("populated by mempool"); + let contextual_masses = self.calculate_transaction_contextual_masses(mutable_tx).ok_or(TxRuleError::MassIncomputable)?; + mutable_tx.tx.set_mass(contextual_masses.storage_mass); + + let total_in: u64 = mutable_tx.entries.iter().map(|entry| entry.as_ref().unwrap().amount).sum(); + let total_out: u64 = mutable_tx.tx.outputs.iter().map(|output| output.value).sum(); + let fee = total_in - total_out; + + if let Some(threshold) = args.feerate_threshold { + let mass = Mass::new(non_contextual_masses, contextual_masses); + let normalized_mass = mass.normalized_max(&self.mempool_mass_cofactors.get(self.get_virtual_daa_score())); + self.validated_thresholds.write().push((mutable_tx.id(), normalized_mass, threshold)); + if fee as f64 / normalized_mass as f64 <= threshold { + return Err(TxRuleError::FeerateTooLow); + } + } + + mutable_tx.calculated_fee = Some(fee); + Ok(()) + } + + fn validate_mempool_transactions_in_parallel( + &self, + transactions: &mut [MutableTransaction], + args: &TransactionValidationBatchArgs, + ) -> Vec> { + transactions.iter_mut().map(|tx| self.validate_mempool_transaction(tx, args.get(&tx.id()))).collect() + } + + fn populate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + transactions.iter_mut().map(|tx| self.validate_mempool_transaction(tx, &Default::default())).collect() + } + + fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> TxResult { + Ok(self + .non_contextual_mass_overrides + .read() + .get(&transaction.id()) + .copied() + .unwrap_or_else(|| NonContextualMasses::new(1, transaction.payload.len() as u64))) + } + + fn calculate_transaction_contextual_masses(&self, transaction: &MutableTransaction) -> Option { + self.mass_calculator.calc_contextual_masses(&transaction.as_verifiable()) + } + + fn get_virtual_daa_score(&self) -> u64 { + self.virtual_daa_score.load(Ordering::Relaxed) + } + + fn get_virtual_state_approx_id(&self) -> VirtualStateApproxId { + VirtualStateApproxId::new(self.get_virtual_daa_score(), 0.into(), ZERO_HASH) + } + + fn modify_coinbase_payload(&self, payload: Vec, _miner_data: &MinerData) -> CoinbaseResult> { + Ok(payload) + } + + fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction]) -> Hash { + calc_hash_merkle_root(txs.iter()) + } +} + +#[test] +fn mined_templates_respect_consensus_transient_mass_across_mempool_delay() { + let params = transient_activation_params(); + let delay_daa_score = mempool_delay_daa_score(¶ms); + let cases = [ + ("pre activation", ACTIVATION_DAA_SCORE - 1, 2usize, PRIOR_BLOCK_MASS_LIMIT), + ("at activation", ACTIVATION_DAA_SCORE, 2, PRIOR_BLOCK_MASS_LIMIT), + ("before delayed mempool activation", ACTIVATION_DAA_SCORE + delay_daa_score - 1, 2, PRIOR_BLOCK_MASS_LIMIT), + ("at delayed mempool activation", ACTIVATION_DAA_SCORE + delay_daa_score, 4, NEW_TRANSIENT_LIMIT), + ("after delayed mempool activation", ACTIVATION_DAA_SCORE + delay_daa_score + 1, 4, NEW_TRANSIENT_LIMIT), + ]; + + for (name, virtual_daa_score, expected_selected_txs, expected_transient_mass) in cases { + let consensus = Arc::new(MassPolicyTestConsensus::new(¶ms)); + let mining_manager = mining_manager(¶ms); + consensus.set_virtual_daa_score(virtual_daa_score); + + let txs = (0..4).map(|i| test_transaction(i, 250_000, 10_000)).collect::>(); + for tx in txs { + insert_transaction(&mining_manager, consensus.as_ref(), tx, RbfPolicy::Forbidden).unwrap(); + } + + let selected_txs = selected_template_transactions(&mining_manager, consensus.as_ref()); + let consensus_limits = params.block_mass_limits().get(virtual_daa_score); + assert_eq!(selected_txs.len(), expected_selected_txs, "{name}: unexpected selected tx count"); + assert_transient_dominates(name, &selected_txs); + assert_eq!(total_transient_mass(&selected_txs), expected_transient_mass, "{name}: unexpected selected transient mass"); + assert!( + total_transient_mass(&selected_txs) <= consensus_limits.transient, + "{name}: template transient mass exceeded consensus limit" + ); + assert!( + total_compute_mass(&selected_txs) <= consensus_limits.compute, + "{name}: template compute mass exceeded consensus limit" + ); + } +} + +#[test] +fn mined_template_handles_transactions_added_on_both_sides_of_mempool_delay() { + let params = transient_activation_params(); + let delay_daa_score = mempool_delay_daa_score(¶ms); + let consensus = Arc::new(MassPolicyTestConsensus::new(¶ms)); + let mining_manager = mining_manager(¶ms); + + consensus.set_virtual_daa_score(ACTIVATION_DAA_SCORE + delay_daa_score - 1); + let old_tx = test_transaction(0, 250_000, 10_000); + insert_transaction(&mining_manager, consensus.as_ref(), old_tx.clone(), RbfPolicy::Forbidden).unwrap(); + + consensus.set_virtual_daa_score(ACTIVATION_DAA_SCORE + delay_daa_score); + let new_txs = [test_transaction(1, 250_000, 10_000), test_transaction(2, 250_000, 10_000)]; + for tx in new_txs.iter().cloned() { + insert_transaction(&mining_manager, consensus.as_ref(), tx, RbfPolicy::Forbidden).unwrap(); + } + + let selected_txs = selected_template_transactions(&mining_manager, consensus.as_ref()); + let selected_ids = selected_txs.iter().map(Transaction::id).collect::>(); + assert_eq!(selected_txs.len(), 3); + assert!(selected_ids.contains(&old_tx.id()), "old pre-delay transaction should still be selectable"); + for tx in new_txs { + assert!(selected_ids.contains(&tx.id()), "new post-delay transaction should be selectable"); + } + assert_transient_dominates("mixed boundary", &selected_txs); + assert_eq!(total_transient_mass(&selected_txs), 750_000); + assert!(total_transient_mass(&selected_txs) <= params.block_mass_limits().get(consensus.get_virtual_daa_score()).transient); +} + +#[test] +fn rbf_lower_fee_replacement_is_rejected_at_delayed_mempool_activation_boundary() { + let params = transient_activation_params(); + let delay_daa_score = mempool_delay_daa_score(¶ms); + let boundary_daa_score = ACTIVATION_DAA_SCORE + delay_daa_score; + let consensus = Arc::new(MassPolicyTestConsensus::new(¶ms)); + let mining_manager = mining_manager(¶ms); + + consensus.set_virtual_daa_score(boundary_daa_score - 1); + let owner = test_transaction(0, 500_000, 1_000); + insert_transaction(&mining_manager, consensus.as_ref(), owner.clone(), RbfPolicy::Forbidden).unwrap(); + + let replacement_before_boundary = double_spend_transaction(1, &owner, 500_000, 900); + assert!( + insert_transaction(&mining_manager, consensus.as_ref(), replacement_before_boundary, RbfPolicy::Allowed).is_err(), + "lower-fee RBF must fail before the delayed mempool activation" + ); + + consensus.set_virtual_daa_score(boundary_daa_score); + let replacement = double_spend_transaction(2, &owner, 500_000, 900); + assert!( + insert_transaction(&mining_manager, consensus.as_ref(), replacement.clone(), RbfPolicy::Allowed).is_err(), + "lower-fee RBF must still fail once both transactions are compared under the same relaxed mempool policy" + ); + + let threshold_checks = consensus.validated_thresholds(); + assert!( + threshold_checks.iter().any(|(tx_id, normalized_mass, _)| *tx_id == replacement.id() && *normalized_mass == 250_000), + "replacement should have been checked against the post-delay normalized transient mass" + ); + assert!(mining_manager.has_transaction(&owner.id(), crate::model::tx_query::TransactionQuery::All)); + assert!(!mining_manager.has_transaction(&replacement.id(), crate::model::tx_query::TransactionQuery::All)); +} + +#[test] +fn template_limits_reject_transient_tx_until_delayed_mempool_activation() { + let params = transient_activation_params(); + let delay_daa_score = mempool_delay_daa_score(¶ms); + let boundary_daa_score = ACTIVATION_DAA_SCORE + delay_daa_score; + let consensus = Arc::new(MassPolicyTestConsensus::new(¶ms)); + let mining_manager = mining_manager(¶ms); + let tx = test_transaction(0, 750_000, 10_000); + + consensus.set_virtual_daa_score(boundary_daa_score - 1); + let err = match insert_transaction(&mining_manager, consensus.as_ref(), tx.clone(), RbfPolicy::Forbidden) { + Ok(_) => panic!("transient-heavy tx should exceed the pre-delay template mass limit"), + Err(err) => err, + }; + assert!( + matches!(err, MiningManagerError::MempoolError(RuleError::RejectTransientMass(tx_id, 750_000, PRIOR_BLOCK_MASS_LIMIT)) if tx_id == tx.id()), + "expected transient-heavy tx to exceed pre-delay template mass limit, got {err:?}" + ); + assert_eq!(consensus.validation_attempts(), 0, "transient limit rejection should happen before consensus in-context validation"); + + consensus.set_virtual_daa_score(boundary_daa_score); + insert_transaction(&mining_manager, consensus.as_ref(), tx.clone(), RbfPolicy::Forbidden) + .expect("same tx should fit once the delayed mempool transient limit activates"); + assert!(mining_manager.has_transaction(&tx.id(), crate::model::tx_query::TransactionQuery::All)); +} + +#[test] +fn template_limits_reject_compute_tx_before_consensus_validation() { + let params = transient_activation_params(); + let consensus = Arc::new(MassPolicyTestConsensus::new(¶ms)); + let mining_manager = mining_manager(¶ms); + let tx = test_transaction(0, 1, 10_000); + consensus.set_non_contextual_masses(tx.id(), NonContextualMasses::new(PRIOR_BLOCK_MASS_LIMIT + 1, 1)); + + let err = match insert_transaction(&mining_manager, consensus.as_ref(), tx.clone(), RbfPolicy::Forbidden) { + Ok(_) => panic!("compute-heavy tx should exceed the block-template compute limit"), + Err(err) => err, + }; + assert!( + matches!(err, MiningManagerError::MempoolError(RuleError::RejectComputeMass(tx_id, compute, PRIOR_BLOCK_MASS_LIMIT)) + if tx_id == tx.id() && compute == PRIOR_BLOCK_MASS_LIMIT + 1), + "expected tx to exceed block-template compute limit, got {err:?}" + ); + assert_eq!(consensus.validation_attempts(), 0, "compute limit rejection should happen before consensus in-context validation"); +} + +#[test] +fn template_limits_reject_storage_tx_after_consensus_validation() { + let params = transient_activation_params(); + let consensus = Arc::new(MassPolicyTestConsensus::new(¶ms)); + let mining_manager = mining_manager(¶ms); + let tx = test_transaction_with_input_amount(0, 1, 1, 2); + + let err = match insert_transaction(&mining_manager, consensus.as_ref(), tx.clone(), RbfPolicy::Forbidden) { + Ok(_) => panic!("tiny-output tx should exceed the block-template storage mass limit"), + Err(err) => err, + }; + assert!( + matches!(err, MiningManagerError::MempoolError(RuleError::RejectStorageMass(tx_id, storage, PRIOR_BLOCK_MASS_LIMIT)) + if tx_id == tx.id() && storage > PRIOR_BLOCK_MASS_LIMIT), + "expected tx to exceed block-template storage mass limit, got {err:?}" + ); + assert_eq!(consensus.validation_attempts(), 1, "storage limit rejection should happen after consensus in-context validation"); +} + +#[test] +fn template_limits_reject_gas_even_when_non_standard_transactions_are_allowed() { + let params = transient_activation_params(); + let consensus = Arc::new(MassPolicyTestConsensus::new(¶ms)); + let mining_manager = mining_manager(¶ms); + let tx = test_transaction_with_gas(0, 10_000, 10_000, DEFAULT_GAS_PER_LANE_LIMIT + 1); + + let err = match insert_transaction(&mining_manager, consensus.as_ref(), tx.clone(), RbfPolicy::Forbidden) { + Ok(_) => panic!("gas-heavy tx should exceed the block-template gas limit"), + Err(err) => err, + }; + assert!( + matches!( + err, + MiningManagerError::MempoolError(RuleError::RejectGas(tx_id, gas, DEFAULT_GAS_PER_LANE_LIMIT)) + if tx_id == tx.id() && gas == DEFAULT_GAS_PER_LANE_LIMIT + 1 + ), + "expected tx to exceed block-template gas limit, got {err:?}" + ); + assert_eq!(consensus.validation_attempts(), 0, "gas limit rejection should happen before consensus in-context validation"); +} + +fn transient_activation_params() -> Params { + let mut params = SIMNET_PARAMS.clone(); + params.prior_block_mass_limits = BlockMassLimits::with_shared_limit(PRIOR_BLOCK_MASS_LIMIT); + params.new_transient_mass_limit = NEW_TRANSIENT_LIMIT; + params.covenants_activation = ForkActivation::new(ACTIVATION_DAA_SCORE); + params +} + +fn mempool_delay_daa_score(params: &Params) -> u64 { + 24 * 60 * 60 * params.bps() +} + +fn mining_manager(params: &Params) -> MiningManager { + let config = Config::build_default(TARGET_TIME_PER_BLOCK, true, params.mempool_block_mass_limits(), BLOCK_LANE_LIMITS); + MiningManager::with_config(config, None, Arc::new(MiningCounters::default())) +} + +fn test_transaction(n: u64, transient_mass: u64, fee: u64) -> MutableTransaction { + test_transaction_with_gas(n, transient_mass, fee, 0) +} + +fn test_transaction_with_input_amount(n: u64, transient_mass: u64, fee: u64, input_amount: u64) -> MutableTransaction { + transaction_spending_outpoint(n, outpoint(n), transient_mass, fee, input_amount, 0) +} + +fn test_transaction_with_gas(n: u64, transient_mass: u64, fee: u64, gas: u64) -> MutableTransaction { + transaction_spending_outpoint(n, outpoint(n), transient_mass, fee, 10 * SOMPI_PER_KASPA, gas) +} + +fn double_spend_transaction(n: u64, owner: &MutableTransaction, transient_mass: u64, fee: u64) -> MutableTransaction { + transaction_spending_outpoint(n, owner.tx.inputs[0].previous_outpoint, transient_mass, fee, 10 * SOMPI_PER_KASPA, 0) +} + +fn transaction_spending_outpoint( + n: u64, + outpoint: TransactionOutpoint, + transient_mass: u64, + fee: u64, + input_amount: u64, + gas: u64, +) -> MutableTransaction { + let script_public_key = ScriptPublicKey::new(0, scriptvec![0x51]); + let input = TransactionInput::new(outpoint, vec![], MAX_TX_IN_SEQUENCE_NUM, 0); + let output = TransactionOutput::new(input_amount - fee, script_public_key.clone()); + let tx = + Transaction::new(TX_VERSION, vec![input], vec![output], 0, SUBNETWORK_ID_NATIVE, gas, vec![n as u8; transient_mass as usize]); + let entry = UtxoEntry::new(input_amount, script_public_key, 0, false, None); + MutableTransaction::with_entries(tx.into(), vec![entry]) +} + +fn outpoint(n: u64) -> TransactionOutpoint { + TransactionOutpoint::new(Hash::from_u64_word(n), 0) +} + +fn insert_transaction( + mining_manager: &MiningManager, + consensus: &dyn ConsensusApi, + tx: MutableTransaction, + rbf_policy: RbfPolicy, +) -> crate::errors::MiningManagerResult { + mining_manager.validate_and_insert_mutable_transaction(consensus, tx, Priority::Low, Orphan::Forbidden, rbf_policy) +} + +fn selected_template_transactions(mining_manager: &MiningManager, consensus: &dyn ConsensusApi) -> Vec { + let template = + mining_manager.get_block_template(consensus, &MinerData::new(ScriptPublicKey::new(0, scriptvec![]), vec![])).unwrap(); + template.block.transactions.into_iter().skip(1).collect() +} + +fn total_transient_mass(txs: &[Transaction]) -> u64 { + txs.iter().map(|tx| tx.payload.len() as u64).sum() +} + +fn total_compute_mass(txs: &[Transaction]) -> u64 { + txs.len() as u64 +} + +fn assert_transient_dominates(name: &str, txs: &[Transaction]) { + assert!(total_transient_mass(txs) > total_compute_mass(txs) * 100_000, "{name}: expected transient mass to dominate compute mass"); +} diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index e53b290799..ff6fa6dddc 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -1743,7 +1743,8 @@ async fn push_limit_activation_test() { .edit_consensus_params(|p| { p.coinbase_maturity = 0; let mass_limit = 100 * MAX_SCRIPT_ELEMENT_SIZE_POST_TOCCATA as u64; - p.block_mass_limits = kaspa_consensus_core::mass::BlockMassLimits::with_shared_limit(mass_limit); + p.prior_block_mass_limits = kaspa_consensus_core::mass::BlockMassLimits::with_shared_limit(mass_limit); + p.new_transient_mass_limit = mass_limit; p.max_script_public_key_len = 10 * MAX_SCRIPT_ELEMENT_SIZE_POST_TOCCATA; p.storage_mass_parameter = 1; p.covenants_activation = ForkActivation::new(ACTIVATION_DAA_SCORE) @@ -1925,6 +1926,7 @@ async fn payload_test() { }; consensus.validate_and_insert_block(funding_block.to_immutable()).virtual_state_task.await.unwrap(); + let transient_limit = config.params.block_mass_limits().before().transient; let mut txx = Transaction::new( 0, vec![TransactionInput::new(TransactionOutpoint { transaction_id: cb_id, index: 0 }, vec![], 0, 0)], @@ -1932,11 +1934,11 @@ async fn payload_test() { 0, SubnetworkId::default(), 0, - vec![0; (config.params.block_mass_limits.transient / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize], + vec![0; (transient_limit / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize], ); // Create a tx with transient mass over the block limit - txx.payload = vec![0; (config.params.block_mass_limits.transient / TRANSIENT_BYTE_TO_MASS_FACTOR + 100) as usize]; + txx.payload = vec![0; (transient_limit / TRANSIENT_BYTE_TO_MASS_FACTOR + 100) as usize]; let mut tx = MutableTransaction::from_tx(txx.clone()); // This triggers storage mass population consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()).unwrap(); @@ -1944,7 +1946,7 @@ async fn payload_test() { assert_match!(consensus_res, Err(RuleError::ExceedsTransientMassLimit(_, _))); // Fix the payload to be below the limit - txx.payload = vec![0; (config.params.block_mass_limits.transient / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; + txx.payload = vec![0; (transient_limit / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; let mut tx = MutableTransaction::from_tx(txx.clone()); // This triggers storage mass population consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()).unwrap(); @@ -1995,7 +1997,8 @@ async fn payload_for_native_tx_test() { consensus.init(); // Create transaction with large payload - let large_payload = vec![0u8; (config.params.block_mass_limits.transient / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; + let transient_limit = config.params.block_mass_limits().before().transient; + let large_payload = vec![0u8; (transient_limit / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; let mut tx_with_payload = Transaction::new( 0, vec![TransactionInput::new( @@ -2061,7 +2064,8 @@ fn build_p2pk_block( let genesis_header: Header = (&p.genesis).into(); p.genesis.hash = genesis_header.hash; p.mass_per_sig_op = mass_per_sig_op; - p.block_mass_limits = BlockMassLimits { compute: 10_000, storage: u64::MAX, transient: u64::MAX }; + p.prior_block_mass_limits = BlockMassLimits { compute: 10_000, storage: u64::MAX, transient: u64::MAX }; + p.new_transient_mass_limit = u64::MAX; p.covenants_activation = ForkActivation::always(); }) .build(); @@ -2273,7 +2277,7 @@ async fn testnet12_accepts_one_valid_stark_proof_but_rejects_two() { .to_immutable(); assert_match!( consensus.validate_and_insert_block(two_stark_block).virtual_state_task.await, - Err(RuleError::ExceedsComputeMassLimit(_, limit)) if limit == TESTNET12_PARAMS.block_mass_limits.compute + Err(RuleError::ExceedsComputeMassLimit(_, limit)) if limit == TESTNET12_PARAMS.block_mass_limits().after().compute ); consensus.shutdown(wait_handles); } diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index af8f08c377..2544b680e2 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -630,7 +630,7 @@ fn generate_stark_tx_dag( let num_outputs = expand_factor; let signature_script = Arc::new(signature_script); let mass_calculator = MassCalculator::new_with_consensus_params(params); - let mass_cofactors = params.block_mass_limits.cofactors(); + let mass_cofactors = params.block_mass_limits().after().cofactors(); let mut txs = Vec::with_capacity(target_levels * target_width); let mut logged_first_provisional_tx = false; diff --git a/wallet/core/src/tx/mass.rs b/wallet/core/src/tx/mass.rs index a67eed0db6..8021fff63b 100644 --- a/wallet/core/src/tx/mass.rs +++ b/wallet/core/src/tx/mass.rs @@ -53,8 +53,9 @@ pub fn calc_minimum_required_transaction_relay_fee(mass: u64) -> u64 { /// /// It is exposed by `MiningManager` for use by transaction generators and wallets. pub fn is_transaction_output_dust(transaction_output: &TransactionOutput) -> bool { - // Unspendable outputs are considered dust. - // + // TODO(post-toccata): review this wallet-side dust helper against the updated mempool + // standardness policy. Mempool no longer rejects dust by threshold, but wallet generation + // may still want a local small-output/change-disposal heuristic. // TODO: call script engine when available // if txscript.is_unspendable(transaction_output.script_public_key.script()) { // return true @@ -81,7 +82,6 @@ pub fn is_transaction_output_dust(transaction_output: &TransactionOutput) -> boo // The most common scripts are pay-to-pubkey, and as per the above // breakdown, the minimum size of a p2pk input script is 148 bytes. So // that figure is used. - // let output = transaction_output.clone().try_into().unwrap(); let total_serialized_size = transaction_output_serialized_byte_size(transaction_output) + 148; // The output is considered dust if the cost to the network to spend the