diff --git a/mining/src/feerate/mod.rs b/mining/src/feerate/mod.rs index dd912f8627..8a6e863704 100644 --- a/mining/src/feerate/mod.rs +++ b/mining/src/feerate/mod.rs @@ -117,6 +117,10 @@ impl FeerateEstimator { /// Returns the feerate value for which the integral area is `frac` of the total area between `lower` and `upper`. fn quantile(&self, lower: f64, upper: f64, frac: f64) -> f64 { assert!((0f64..=1f64).contains(&frac)); + if lower == upper { + // A collapsed interval has only one quantile; avoid formula roundoff. + return lower; + } assert!(0.0 < lower && lower <= upper, "{lower}, {upper}"); let (c1, c2) = (self.inclusion_interval, self.total_weight); if c1 == 0.0 || c2 == 0.0 { diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index abb436abe0..42bbe25354 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -1042,7 +1042,7 @@ mod tests { consensus.add_transaction(child_tx_2.clone(), 3); // Add to mempool a transaction that spends child_tx_2 (as high priority) - let spending_tx = create_transaction(&child_tx_2, 1_000); + let spending_tx = create_transaction(&child_tx_2, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE); let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), spending_tx.clone(), @@ -1165,6 +1165,7 @@ mod tests { validate_and_insert_mutable_transaction(&mining_manager, consensus.as_ref(), tx).unwrap(); } assert_eq!(mining_manager.get_all_transactions(TransactionQuery::TransactionsOnly).0.len(), TX_COUNT); + let high_fee = MAX_BLOCK_MASS * DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE / 1000; let heavy_tx_low_fee = { let mut heavy_tx = create_transaction_with_utxo_entry(TX_COUNT as u32, 0); @@ -1182,7 +1183,7 @@ mod tests { let mut inner_tx = (*(heavy_tx.tx)).clone(); inner_tx.payload = vec![0u8; TX_COUNT / 2 * tx_size - inner_tx.estimate_mem_bytes()]; heavy_tx.tx = inner_tx.into(); - heavy_tx.calculated_fee = Some(500_000); + heavy_tx.calculated_fee = Some(high_fee); heavy_tx }; validate_and_insert_mutable_transaction(&mining_manager, consensus.as_ref(), heavy_tx_high_fee.clone()).unwrap(); @@ -1194,12 +1195,37 @@ mod tests { let mut inner_tx = (*(heavy_tx.tx)).clone(); inner_tx.payload = vec![0u8; size_limit]; heavy_tx.tx = inner_tx.into(); - heavy_tx.calculated_fee = Some(500_000); + heavy_tx.calculated_fee = Some(high_fee); heavy_tx }; assert!(validate_and_insert_mutable_transaction(&mining_manager, consensus.as_ref(), too_big_tx.clone()).is_err()); } + #[test] + fn test_realtime_feerate_estimations_respect_minimum_standard_feerate() { + let minimum_feerate = DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE as f64 / 1000.0; + + for tx_count in [0, 1, 10, 100, 500] { + let consensus = Arc::new(ConsensusMock::new()); + let mining_manager = default_mining_manager(); + + for i in 0..tx_count { + let tx = create_transaction_with_utxo_entry(i, 0); + validate_and_insert_mutable_transaction(&mining_manager, consensus.as_ref(), tx).unwrap(); + } + + let estimations = mining_manager.get_realtime_feerate_estimations(); + for bucket in estimations.ordered_buckets() { + assert!( + bucket.feerate >= minimum_feerate, + "mempool with {tx_count} txs returned bucket feerate {} below minimum standard feerate {}", + bucket.feerate, + minimum_feerate + ); + } + } + } + fn validate_and_insert_mutable_transaction( mining_manager: &MiningManager, consensus: &dyn ConsensusApi, @@ -1470,7 +1496,7 @@ mod tests { fn create_child_and_parent_txs_and_add_parent_to_consensus(consensus: &Arc) -> Transaction { let parent_tx = create_transaction_without_input(vec![500 * SOMPI_PER_KASPA]); - let child_tx = create_transaction(&parent_tx, 1000); + let child_tx = create_transaction(&parent_tx, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE); consensus.add_transaction(parent_tx, 1); child_tx } diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index 3202b41825..6e1c19e21c 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -132,6 +132,9 @@ mod tests { use std::sync::Arc; const RELAY_FEE_TEST_MASS: u64 = 500_000; + const fn default_minimum_relay_fee_for_mass(mass: u64) -> u64 { + mass * DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE / 1000 + } #[test] fn test_calc_min_required_tx_relay_fee() { @@ -155,13 +158,13 @@ mod tests { name: "100 bytes with default minimum relay fee", size: 100, minimum_relay_transaction_fee: DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, - want: 100, + want: default_minimum_relay_fee_for_mass(100), }, Test { name: "large relay fee test mass with default minimum relay fee", size: RELAY_FEE_TEST_MASS, minimum_relay_transaction_fee: DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, - want: RELAY_FEE_TEST_MASS, + want: default_minimum_relay_fee_for_mass(RELAY_FEE_TEST_MASS), }, Test { name: "1500 bytes with 5000 relay fee", size: 1500, minimum_relay_transaction_fee: 5000, want: 7500 }, Test { name: "1500 bytes with 3000 relay fee", size: 1500, minimum_relay_transaction_fee: 3000, want: 4500 }, @@ -369,26 +372,38 @@ mod tests { mtx } + let insufficient_fee_mass = 10_000; + let insufficient_fee_minimum = default_minimum_relay_fee_for_mass(insufficient_fee_mass); + let insufficient_fee = insufficient_fee_minimum - 1; + let tests = vec![ Test { name: "standard input with sufficient fee", - mtx: new_mtx(standard_script_public_key.clone(), NonContextualMasses::new(1_000, 500), 1_000), + mtx: new_mtx( + standard_script_public_key.clone(), + NonContextualMasses::new(1_000, 500), + DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, + ), expected: Expected::Standard, }, Test { name: "non-standard input script class", - mtx: new_mtx(non_standard_script_public_key, NonContextualMasses::new(1_000, 1_000), 1_000), + mtx: new_mtx( + non_standard_script_public_key, + NonContextualMasses::new(1_000, 1_000), + DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, + ), expected: Expected::RejectInputScriptClass, }, Test { name: "compute mass triggers insufficient relay fee", - mtx: new_mtx(standard_script_public_key.clone(), NonContextualMasses::new(10_000, 1), 9_999), - expected: Expected::RejectInsufficientFee { fee: 9_999, minimum_fee: 10_000 }, + mtx: new_mtx(standard_script_public_key.clone(), NonContextualMasses::new(insufficient_fee_mass, 1), insufficient_fee), + expected: Expected::RejectInsufficientFee { fee: insufficient_fee, minimum_fee: insufficient_fee_minimum }, }, Test { name: "transient mass triggers insufficient relay fee", - mtx: new_mtx(standard_script_public_key, NonContextualMasses::new(1, 10_000), 9_999), - expected: Expected::RejectInsufficientFee { fee: 9_999, minimum_fee: 10_000 }, + mtx: new_mtx(standard_script_public_key, NonContextualMasses::new(1, insufficient_fee_mass), insufficient_fee), + expected: Expected::RejectInsufficientFee { fee: insufficient_fee, minimum_fee: insufficient_fee_minimum }, }, ]; diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index 66bb381ada..c3042eebd9 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -19,7 +19,8 @@ pub(crate) const DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_COUNT: u64 = 500; /// DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE specifies the minimum transaction fee for a transaction to be accepted to /// the mempool and relayed. It is specified in sompi per 1kg (or 1000 grams) of transaction mass. -pub(crate) const DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE: u64 = 1000; +/// The default is 100 sompi per gram. +pub(crate) const DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE: u64 = 100_000; #[derive(Clone, Debug)] pub struct Config { diff --git a/mining/src/mempool/model/frontier.rs b/mining/src/mempool/model/frontier.rs index a880fcca3f..c54824f712 100644 --- a/mining/src/mempool/model/frontier.rs +++ b/mining/src/mempool/model/frontier.rs @@ -435,9 +435,11 @@ impl Frontier { #[cfg(test)] mod tests { use super::*; + use crate::mempool::config::Config; use feerate_key::tests::build_feerate_key; use itertools::Itertools; use kaspa_consensus_core::{ + mass::BlockMassLimits, subnets::SubnetworkId, tx::{Transaction, TransactionInput, TransactionOutpoint}, }; @@ -579,10 +581,31 @@ mod tests { /// Epsilon used for various test comparisons const EPS: f64 = 0.000001; + /// Test estimation floors: legacy behavior, current policy, and a higher stress floor. + const TEST_MIN_FEERATES: [f64; 3] = [1.0, 100.0, 1000.0]; + + fn minimum_standard_feerate() -> f64 { + Config::build_default(1_000, false, BlockMassLimits::with_shared_limit(500_000), DEFAULT_BLOCK_LANE_LIMITS).minimum_feerate() + } + + fn assert_valid_feerate_buckets(estimations: &crate::feerate::FeerateEstimations, min_feerate: f64) { + for bucket in estimations.ordered_buckets() { + // Test for the absence of NaN, infinite or zero values in buckets. + assert!( + bucket.feerate.is_normal() && bucket.feerate >= min_feerate - EPS, + "bucket feerate {} must be finite and at least {}", + bucket.feerate, + min_feerate + ); + assert!( + bucket.estimated_seconds.is_normal() && bucket.estimated_seconds > 0.0, + "bucket estimated seconds must be a finite number greater than zero" + ); + } + } #[test] fn test_feerate_estimator() { - const MIN_FEERATE: f64 = 1.0; let mut rng = thread_rng(); let cap = 2000; let mut map = HashMap::with_capacity(cap); @@ -607,63 +630,44 @@ mod tests { let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; // We are testing that the build function actually returns and is not looping indefinitely let estimator = frontier.build_feerate_estimator(args); - let estimations = estimator.calc_estimations(MIN_FEERATE); - let buckets = estimations.ordered_buckets(); - // Test for the absence of NaN, infinite or zero values in buckets - for b in buckets.iter() { - assert!( - b.feerate.is_normal() && b.feerate >= MIN_FEERATE - EPS, - "bucket feerate must be a finite number greater or equal to the minimum standard feerate" - ); - assert!( - b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0, - "bucket estimated seconds must be a finite number greater than zero" - ); + for min_feerate in TEST_MIN_FEERATES { + let estimations = estimator.calc_estimations(min_feerate); + assert_valid_feerate_buckets(&estimations, min_feerate); + dbg!(len, min_feerate, &estimator); + dbg!(estimations); } - dbg!(len, estimator); - dbg!(estimations); } } #[test] fn test_constant_feerate_estimator() { - const MIN_FEERATE: f64 = 1.0; let cap = 20_000; - let mut map = HashMap::with_capacity(cap); - for i in 0..cap as u64 { - let mass: u64 = 1650; - let fee = (mass as f64 * MIN_FEERATE) as u64; - let key = build_feerate_key(fee, mass, i); - map.insert(key.tx.id(), key); - } - - for len in [0, 1, 10, 100, 200, 300, 500, 750, cap / 2, (cap * 2) / 3, (cap * 4) / 5, (cap * 5) / 6, cap] { - println!(); - println!("Testing a frontier with {} txs...", len.min(cap)); - let mut frontier = Frontier::new(1.0); - for item in map.values().take(len).cloned() { - frontier.insert(item).then_some(()).unwrap(); + for min_feerate in TEST_MIN_FEERATES { + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let mass: u64 = 1650; + let fee = (mass as f64 * min_feerate) as u64; + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); } - let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; - // We are testing that the build function actually returns and is not looping indefinitely - let estimator = frontier.build_feerate_estimator(args); - let estimations = estimator.calc_estimations(MIN_FEERATE); - let buckets = estimations.ordered_buckets(); - // Test for the absence of NaN, infinite or zero values in buckets - for b in buckets.iter() { - assert!( - b.feerate.is_normal() && b.feerate >= MIN_FEERATE - EPS, - "bucket feerate must be a finite number greater or equal to the minimum standard feerate" - ); - assert!( - b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0, - "bucket estimated seconds must be a finite number greater than zero" - ); + for len in [0, 1, 10, 100, 200, 300, 500, 750, cap / 2, (cap * 2) / 3, (cap * 4) / 5, (cap * 5) / 6, cap] { + println!(); + println!("Testing a frontier with {} txs and min feerate {}...", len.min(cap), min_feerate); + let mut frontier = Frontier::new(1.0); + for item in map.values().take(len).cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; + // We are testing that the build function actually returns and is not looping indefinitely + let estimator = frontier.build_feerate_estimator(args); + let estimations = estimator.calc_estimations(min_feerate); + assert_valid_feerate_buckets(&estimations, min_feerate); + dbg!(len, min_feerate, estimator); + dbg!(estimations); } - dbg!(len, estimator); - dbg!(estimations); } } @@ -719,7 +723,6 @@ mod tests { #[test] fn test_feerate_estimator_with_less_than_block_capacity() { - const MIN_FEERATE: f64 = 1.0; let mut map = HashMap::new(); for i in 0..304 { let mass: u64 = 1650; @@ -738,23 +741,71 @@ mod tests { let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; // We are testing that the build function actually returns and is not looping indefinitely let estimator = frontier.build_feerate_estimator(args); - let estimations = estimator.calc_estimations(MIN_FEERATE); - let buckets = estimations.ordered_buckets(); - // Test for the absence of NaN, infinite or zero values in buckets - for b in buckets.iter() { - // Expect min feerate bcs blocks are not full - assert!( - (b.feerate - MIN_FEERATE).abs() <= EPS, - "bucket feerate is expected to be equal to the minimum standard feerate" - ); + for min_feerate in TEST_MIN_FEERATES { + let estimations = estimator.calc_estimations(min_feerate); + let buckets = estimations.ordered_buckets(); + // Test for the absence of NaN, infinite or zero values in buckets + for b in buckets.iter() { + // Expect min feerate bcs blocks are not full + assert!( + (b.feerate - min_feerate).abs() <= EPS, + "bucket feerate is expected to be equal to the minimum standard feerate" + ); + assert!( + b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0 && b.estimated_seconds <= 1.0, + "bucket estimated seconds must be a finite number greater than zero & less than 1.0" + ); + } + dbg!(len, min_feerate, &estimator); + dbg!(estimations); + } + } + } + + #[test] + fn test_feerate_estimator_buckets_never_drop_below_minimum_standard_feerate() { + let minimum_feerate = minimum_standard_feerate(); + let scenarios = [ + ("empty", vec![]), + ("single below-floor transaction", vec![(1, 1000)]), + ("single minimum-feerate transaction", vec![((minimum_feerate * 1000.0) as u64, 1000)]), + ("less than one block", (0..100).map(|i| (50_000 + i, 1650)).collect_vec()), + ("around one block", (0..304).map(|i| (50_000 + i, 1650)).collect_vec()), + ("many low-feerate transactions", (0..2000).map(|i| (1 + i, 1650)).collect_vec()), + ( + "high outliers over low-feerate backlog", + (0..2000).map(|i| if i < 10 { (50_000_000_000 + i, 1650) } else { (1 + i, 1650) }).collect_vec(), + ), + ( + "mixed masses over several blocks", + (0..2000) + .map(|i| { + let mass = if i % 5 == 0 { 90_000 } else { 1650 }; + let fee = if i % 11 == 0 { 100_000_000 + i } else { 1 + i }; + (fee, mass) + }) + .collect_vec(), + ), + ]; + + for (name, txs) in scenarios { + let mut frontier = Frontier::new(1.0); + for (i, (fee, mass)) in txs.into_iter().enumerate() { + frontier.insert(build_feerate_key(fee, mass, i as u64)).then_some(()).unwrap(); + } + + let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; + let estimator = frontier.build_feerate_estimator(args); + let estimations = estimator.calc_estimations(minimum_feerate); + assert_valid_feerate_buckets(&estimations, minimum_feerate); + + for (higher, lower) in estimations.ordered_buckets().into_iter().tuple_windows() { assert!( - b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0 && b.estimated_seconds <= 1.0, - "bucket estimated seconds must be a finite number greater than zero & less than 1.0" + higher.feerate + EPS >= lower.feerate, + "{name}: buckets must remain ordered by decreasing feerate: {higher:?}, {lower:?}" ); } - dbg!(len, estimator); - dbg!(estimations); } } } diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index 206fd03c61..bae235ad94 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -5,7 +5,7 @@ use itertools::Itertools; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ config::params::{TESTNET_PARAMS, TESTNET12_PARAMS}, - constants::{SOMPI_PER_KASPA, TX_VERSION, TX_VERSION_TOCCATA}, + constants::{SOMPI_PER_KASPA, TRANSIENT_BYTE_TO_MASS_FACTOR, TX_VERSION, TX_VERSION_TOCCATA}, hashing::covenant_id::covenant_id, network::NetworkType, sign::sign, @@ -30,7 +30,7 @@ use secp256k1::{ use tokio::time::{Instant, MissedTickBehavior, interval}; const DEFAULT_SEND_AMOUNT: u64 = 10 * SOMPI_PER_KASPA; -const FEE_RATE: u64 = 10; +const FEE_RATE: u64 = 101; const MILLIS_PER_TICK: u64 = 10; const ADDRESS_VERSION: Version = Version::PubKey; @@ -577,14 +577,18 @@ fn clean_old_pending_outpoints(pending: &mut HashMap u64 { - FEE_RATE * estimated_mass(num_utxos, num_outs) +fn required_fee(num_utxos: usize, num_outs: u64, payload_size: usize) -> u64 { + FEE_RATE * estimated_compute_mass(num_utxos, num_outs).max(estimated_transient_mass(num_utxos, num_outs, payload_size)) } -fn estimated_mass(num_utxos: usize, num_outs: u64) -> u64 { +fn estimated_compute_mass(num_utxos: usize, num_outs: u64) -> u64 { 200 + 34 * num_outs + 1000 * (num_utxos as u64) } +fn estimated_transient_mass(num_utxos: usize, num_outs: u64, payload_size: usize) -> u64 { + (200 + 34 * num_outs + 1000 * (num_utxos as u64) + payload_size as u64) * TRANSIENT_BYTE_TO_MASS_FACTOR +} + fn apply_random_covenant_binding_from_inputs(tx: &mut MutableTransaction, with_covenant_id: bool) { if !with_covenant_id { return; @@ -679,7 +683,7 @@ fn select_utxos( selected_amount += entry.amount; selected.push((outpoint, entry)); - let fee = required_fee(selected.len(), num_outs); + let fee = required_fee(selected.len(), num_outs, tx_config.payload_size); let priority_fee = if tx_config.randomize_fee && tx_config.priority_fee > 0 { rng.gen_range(0..tx_config.priority_fee) } else { diff --git a/testing/integration/src/common/utils.rs b/testing/integration/src/common/utils.rs index 66bb2df099..62d23d6397 100644 --- a/testing/integration/src/common/utils.rs +++ b/testing/integration/src/common/utils.rs @@ -2,7 +2,7 @@ use super::client::ListeningClient; use itertools::Itertools; use kaspa_addresses::Address; use kaspa_consensus_core::{ - constants::{TX_VERSION, TX_VERSION_TOCCATA}, + constants::{TRANSIENT_BYTE_TO_MASS_FACTOR, TX_VERSION, TX_VERSION_TOCCATA}, header::Header, mass::SigopCount, sign::sign, @@ -34,13 +34,23 @@ use tokio::time::timeout; pub(crate) const EXPAND_FACTOR: u64 = 2; pub(crate) const CONTRACT_FACTOR: u64 = 2; -const fn estimated_mass(num_inputs: usize, num_outputs: u64) -> u64 { +const fn estimated_compute_mass(num_inputs: usize, num_outputs: u64) -> u64 { 200 + 34 * num_outputs + 1000 * (num_inputs as u64) } +const fn estimated_transient_mass(num_inputs: usize, num_outputs: u64, extra_serialized_bytes: u64) -> u64 { + (200 + 34 * num_outputs + 1000 * (num_inputs as u64) + extra_serialized_bytes) * TRANSIENT_BYTE_TO_MASS_FACTOR +} + pub const fn required_fee(num_inputs: usize, num_outputs: u64) -> u64 { - const FEE_RATE: u64 = 10; - FEE_RATE * estimated_mass(num_inputs, num_outputs) + required_fee_with_extra_serialized_bytes(num_inputs, num_outputs, 0) +} + +pub const fn required_fee_with_extra_serialized_bytes(num_inputs: usize, num_outputs: u64, extra_serialized_bytes: u64) -> u64 { + const FEE_RATE: u64 = 101; + let compute_mass = estimated_compute_mass(num_inputs, num_outputs); + let transient_mass = estimated_transient_mass(num_inputs, num_outputs, extra_serialized_bytes); + FEE_RATE * if compute_mass > transient_mass { compute_mass } else { transient_mass } } /// Builds a TX DAG based on the initial UTXO set and on constant params diff --git a/testing/integration/src/daemon_integration_tests.rs b/testing/integration/src/daemon_integration_tests.rs index 02c40f9a59..e1374c4341 100644 --- a/testing/integration/src/daemon_integration_tests.rs +++ b/testing/integration/src/daemon_integration_tests.rs @@ -2,7 +2,7 @@ use crate::common::{ client::ListeningClient, client_notify::ChannelNotify, daemon::Daemon, - utils::{fetch_spendable_utxos, mine_block, required_fee, wait_for}, + utils::{fetch_spendable_utxos, mine_block, required_fee, required_fee_with_extra_serialized_bytes, wait_for}, }; use kaspa_addresses::Address; use kaspa_alloc::init_allocator_with_default_settings; @@ -816,9 +816,9 @@ async fn daemon_pruning_seqcommit_sync_test() { // still within the finality depth of the spending block. let outpoint = TransactionOutpoint::new(seqcommit_tx.id(), 0); let pay_spk = pay_to_address_script(&miner_address); - let spend_fee = required_fee(1, 1); - let spend_value = total_in - fee - spend_fee; let signature_script = pay_to_script_hash_signature_script(redeem_script, vec![]).expect("canonical signature script"); + let spend_fee = required_fee_with_extra_serialized_bytes(1, 1, signature_script.len() as u64); + let spend_value = total_in - fee - spend_fee; let spend_tx = Transaction::new( TX_VERSION, vec![TransactionInput::new(outpoint, signature_script, 0, 1)], diff --git a/wallet/core/src/tx/generator/generator.rs b/wallet/core/src/tx/generator/generator.rs index 1e7802beb0..0d55377d8b 100644 --- a/wallet/core/src/tx/generator/generator.rs +++ b/wallet/core/src/tx/generator/generator.rs @@ -1036,58 +1036,91 @@ impl Generator { .. } = data; - let change_output_value = change_output_value.unwrap_or(0); + let initial_transaction_fees = transaction_fees; + let mut transaction_fees = transaction_fees; + let mut change_output_value = change_output_value.unwrap_or(0); + let priority_fees = match self.inner.final_transaction_priority_fee { + Fees::SenderPays(priority_fees) | Fees::ReceiverPays(priority_fees) => priority_fees, + Fees::None => 0, + }; - let mut final_outputs = self.inner.final_transaction_outputs.clone(); + // Final output values affect storage mass, and storage mass can raise the relay fee. + // Rebuild until the actual transaction mass is covered by the final fee/output split. + let (tx, transaction_mass, aggregate_output_value, change_output_index) = loop { + let mut final_outputs = self.inner.final_transaction_outputs.clone(); + + if self.inner.final_transaction_priority_fee.receiver_pays() { + let output = final_outputs.get_mut(0).expect("include fees requires one output"); + if aggregate_input_value < output.value { + if aggregate_input_value <= transaction_fees { + return Err(Error::TransactionFeesAreTooHigh); + } + output.value = aggregate_input_value - transaction_fees; + } else { + output.value = output.value.checked_sub(transaction_fees).ok_or(Error::TransactionFeesAreTooHigh)?; + } + } - if self.inner.final_transaction_priority_fee.receiver_pays() { - let output = final_outputs.get_mut(0).expect("include fees requires one output"); - if aggregate_input_value < output.value { - output.value = aggregate_input_value - transaction_fees; + let change_output_index = if change_output_value > 0 { + let change_output_index = Some(final_outputs.len()); + final_outputs + .push(TransactionOutput::new(change_output_value, pay_to_address_script(&self.inner.change_address))); + change_output_index } else { - output.value -= transaction_fees; + None + }; + + let aggregate_output_value = final_outputs.iter().map(|output| output.value).sum::(); + // TODO - validate that this is still correct + // `Fees::ReceiverPays` processing can result in outputs being larger than inputs + if aggregate_output_value > aggregate_input_value { + return Err(Error::InsufficientFunds { + additional_needed: aggregate_output_value - aggregate_input_value, + origin: "final", + }); } - } - let change_output_index = if change_output_value > 0 { - let change_output_index = Some(final_outputs.len()); - final_outputs.push(TransactionOutput::new(change_output_value, pay_to_address_script(&self.inner.change_address))); - change_output_index - } else { - None - }; - - let aggregate_output_value = final_outputs.iter().map(|output| output.value).sum::(); - // TODO - validate that this is still correct - // `Fees::ReceiverPays` processing can result in outputs being larger than inputs - if aggregate_output_value > aggregate_input_value { - return Err(Error::InsufficientFunds { - additional_needed: aggregate_output_value - aggregate_input_value, - origin: "final", - }); - } + let tx = Transaction::new( + 0, + inputs.clone(), + final_outputs, + 0, + SUBNETWORK_ID_NATIVE, + 0, + self.inner.final_transaction_payload.clone(), + ); + + let transaction_mass = self.inner.mass_calculator.calc_overall_mass_for_unsigned_consensus_transaction( + &tx, + &utxo_entry_references, + self.inner.minimum_signatures, + )?; + let required_transaction_fees = self.calc_fees_from_mass(transaction_mass) + priority_fees; + if required_transaction_fees <= transaction_fees { + break (tx, transaction_mass, aggregate_output_value, change_output_index); + } - let tx = Transaction::new( - 0, - inputs, - final_outputs, - 0, - SUBNETWORK_ID_NATIVE, - 0, - self.inner.final_transaction_payload.clone(), - ); + let additional_fees = required_transaction_fees - transaction_fees; + transaction_fees = required_transaction_fees; + if change_output_value >= additional_fees { + change_output_value -= additional_fees; + } else { + let additional_needed = additional_fees - change_output_value; + change_output_value = 0; + if self.inner.final_transaction_priority_fee.sender_pays() { + return Err(Error::InsufficientFunds { additional_needed, origin: "final fees" }); + } + } + }; - let transaction_mass = self.inner.mass_calculator.calc_overall_mass_for_unsigned_consensus_transaction( - &tx, - &utxo_entry_references, - self.inner.minimum_signatures, - )?; if transaction_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { // this should never occur as we should not produce transactions higher than the mass limit return Err(Error::MassCalculationError); } tx.set_mass(transaction_mass); + // The final fee loop may discover a higher required fee than the earlier stage estimate. + context.aggregate_fees += transaction_fees - initial_transaction_fees; context.aggregate_mass += transaction_mass; context.final_transaction_id = Some(tx.id()); context.number_of_stages += 1; diff --git a/wallet/core/src/tx/generator/test.rs b/wallet/core/src/tx/generator/test.rs index 88ca3f130e..e37cefc89e 100644 --- a/wallet/core/src/tx/generator/test.rs +++ b/wallet/core/src/tx/generator/test.rs @@ -2,7 +2,7 @@ use crate::error::Error; use crate::result::Result; -use crate::tx::{Fees, MassCalculator, PaymentDestination}; +use crate::tx::{Fees, MassCalculator, PaymentDestination, STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X}; use crate::utxo::UtxoEntryReference; use crate::{tx::PaymentOutputs, utils::kaspa_to_sompi}; use kaspa_addresses::Address; @@ -220,6 +220,9 @@ where assert_eq!(pt.inner.mass, calculated_mass, "pending transaction mass does not match calculated mass"); + let is_acceptable_change_disposal = + |value| calc.is_dust(value) || value <= calc.calc_minimum_transaction_fee_from_mass(STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X); + match expected.priority_fees { FeesExpected::Sender(priority_fees) => { let total_fees_expected = priority_fees + calculated_fees; @@ -230,13 +233,12 @@ where pt_fees ); - // test that fee difference is below dust value as this condition can - // occur if a dust output has been consumed to fees, resulting in - // mismatch between calculated fees and PT fees - let dust_disposal_fees = pt_fees - total_fees_expected; - if !calc.is_dust(dust_disposal_fees) { + // Test that fee difference is small enough to be consumed as change disposal, + // either by legacy dust policy or by current relay-fee cost of preserving it. + let change_disposal_fees = pt_fees - total_fees_expected; + if !is_acceptable_change_disposal(change_disposal_fees) { panic!( - "[Fees SENDER] dust_disposal_fees test failure - pt fees: {pt_fees} expected fees: {total_fees_expected} difference: {dust_disposal_fees}" + "[Fees SENDER] change_disposal_fees test failure - pt fees: {pt_fees} expected fees: {total_fees_expected} difference: {change_disposal_fees}" ); } @@ -255,13 +257,12 @@ where pt_fees ); - // test that fee difference is below dust value as this condition can - // occur if a dust output has been consumed to fees, resulting in - // mismatch between calculated fees and PT fees - let dust_disposal_fees = pt_fees - total_fees_expected; - if !calc.is_dust(dust_disposal_fees) { + // Test that fee difference is small enough to be consumed as change disposal, + // either by legacy dust policy or by current relay-fee cost of preserving it. + let change_disposal_fees = pt_fees - total_fees_expected; + if !is_acceptable_change_disposal(change_disposal_fees) { panic!( - "[Fees RECEIVER] dust_disposal_fees test failure - pt fees: {pt_fees} expected fees: {total_fees_expected} difference: {dust_disposal_fees}" + "[Fees RECEIVER] change_disposal_fees test failure - pt fees: {pt_fees} expected fees: {total_fees_expected} difference: {change_disposal_fees}" ); } @@ -274,13 +275,12 @@ where FeesExpected::None => { assert!(calculated_fees <= pt_fees, "total fees expected: {} is greater than PT fees: {}", calculated_fees, pt_fees); - // test that fee difference is below dust value as this condition can - // occur if a dust output has been consumed to fees, resulting in - // mismatch between calculated fees and PT fees - let dust_disposal_fees = pt_fees - calculated_fees; - if !calc.is_dust(dust_disposal_fees) { + // Test that fee difference is small enough to be consumed as change disposal, + // either by legacy dust policy or by current relay-fee cost of preserving it. + let change_disposal_fees = pt_fees - calculated_fees; + if !is_acceptable_change_disposal(change_disposal_fees) { panic!( - "[Fees NONE] dust_disposal_fees test failure - pt fees: {pt_fees} calculated fees: {calculated_fees} difference: {dust_disposal_fees}" + "[Fees NONE] change_disposal_fees test failure - pt fees: {pt_fees} calculated fees: {calculated_fees} difference: {change_disposal_fees}" ); } @@ -560,7 +560,7 @@ fn test_generator_compound_100k_random_transactions() -> Result<()> { let mut rng = StdRng::seed_from_u64(0); let inputs: Vec = (0..100_000).map(|_| rng.gen_range(0.001..10.0)).collect(); let total = inputs.iter().sum::(); - let outputs = [(output_address, Kaspa(total - 10.0))]; + let outputs = [(output_address, Kaspa(total - 1000.0))]; generator(test_network_id(), &inputs, &[], None, Fees::sender(Kaspa(5.0)), outputs.as_slice()) .unwrap() .harness() @@ -657,7 +657,7 @@ fn test_generator_inputs_100_outputs_1_fees_exclude_success() -> Result<()> { .fetch(&Expected { is_final: true, input_count: 2, - aggregate_input_value: Sompi(999_99886576), + aggregate_input_value: Sompi(999_88657600), output_count: 2, // priority_fees: FeesExpected::sender(Kaspa(5.0)), priority_fees: FeesExpected::sender(Kaspa(0.0)), @@ -697,7 +697,7 @@ fn test_generator_inputs_100_outputs_1_fees_include_success() -> Result<()> { .fetch(&Expected { is_final: true, input_count: 2, - aggregate_input_value: Sompi(99_99886576), + aggregate_input_value: Sompi(99_88657600), output_count: 1, priority_fees: FeesExpected::receiver(Kaspa(5.0)), }) @@ -748,7 +748,7 @@ fn test_generator_inputs_1k_outputs_2_fees_exclude() -> Result<()> { .fetch(&Expected { is_final: true, input_count: 11, - aggregate_input_value: Sompi(9009_98981896), + aggregate_input_value: Sompi(9008_98189600), output_count: 2, priority_fees: FeesExpected::receiver(Kaspa(5.0)), }) @@ -766,7 +766,7 @@ fn test_generator_inputs_32k_outputs_2_fees_exclude() -> Result<()> { &[], None, Fees::sender(Kaspa(10_000.0)), - [(output_address, Kaspa(f * 32_747.0 - 10_001.0))].as_slice(), + [(output_address, Kaspa(f * 32_747.0 - 10_500.0))].as_slice(), ) .unwrap() .harness() diff --git a/wallet/core/src/tx/mass.rs b/wallet/core/src/tx/mass.rs index 8021fff63b..c2e69ff80a 100644 --- a/wallet/core/src/tx/mass.rs +++ b/wallet/core/src/tx/mass.rs @@ -17,7 +17,11 @@ pub const SIGNATURE_SIZE: u64 = 1 + 64 + 1; //1 byte for OP_DATA_65 + 64 (length /// MINIMUM_RELAY_TRANSACTION_FEE specifies the minimum transaction fee for a transaction to be accepted to /// the mempool and relayed. It is specified in sompi per 1kg (or 1000 grams) of transaction mass. -pub(crate) const MINIMUM_RELAY_TRANSACTION_FEE: u64 = 1000; +/// The default is 100 sompi per gram. +pub(crate) const MINIMUM_RELAY_TRANSACTION_FEE: u64 = 100_000; + +/// Legacy fee ratio used only by wallet-side dust heuristics. This is no longer a mempool relay policy. +const DUST_RELAY_TRANSACTION_FEE: u64 = 1000; /// MAXIMUM_STANDARD_TRANSACTION_MASS is the maximum mass allowed for transactions that /// are considered standard and will therefore be relayed and considered for mining. @@ -44,18 +48,16 @@ pub fn calc_minimum_required_transaction_relay_fee(mass: u64) -> u64 { } /// is_transaction_output_dust returns whether or not the passed transaction output -/// amount is considered dust or not based on the configured minimum transaction -/// relay fee. +/// amount is considered dust or not based on the wallet's legacy dust heuristic. /// -/// Dust is defined in terms of the minimum transaction relay fee. In particular, -/// if the cost to the network to spend coins is more than 1/3 of the minimum -/// transaction relay fee, it is considered dust. +/// Dust is defined in terms of the legacy dust fee ratio. In particular, if the +/// estimated cost to spend coins is more than 1/3 of the threshold, it is +/// considered dust. /// -/// It is exposed by `MiningManager` for use by transaction generators and wallets. +/// TODO(post-toccata): review this wallet-side dust helper against the updated mempool +/// standardness policy. Mempool no longer rejects dust by threshold, but wallet generation +/// may still want a local small-output/change-disposal heuristic. pub fn is_transaction_output_dust(transaction_output: &TransactionOutput) -> bool { - // TODO(post-toccata): review this wallet-side dust helper against the updated mempool - // standardness policy. Mempool no longer rejects dust by threshold, but wallet generation - // may still want a local small-output/change-disposal heuristic. // TODO: call script engine when available // if txscript.is_unspendable(transaction_output.script_public_key.script()) { // return true @@ -84,15 +86,12 @@ pub fn is_transaction_output_dust(transaction_output: &TransactionOutput) -> boo // that figure is used. let total_serialized_size = transaction_output_serialized_byte_size(transaction_output) + 148; - // The output is considered dust if the cost to the network to spend the - // coins is more than 1/3 of the minimum free transaction relay fee. - // mp.config.MinimumRelayTransactionFee is in sompi/KB, so multiply - // by 1000 to convert to bytes. + // The output is considered dust if the estimated cost to spend the + // coins is more than 1/3 of the legacy dust threshold. // // Using the typical values for a pay-to-pubkey transaction from - // the breakdown above and the default minimum free transaction relay - // fee of 1000, this equates to values less than 546 sompi being - // considered dust. + // the breakdown above and the legacy dust fee of 1000, this equates + // to values less than 546 sompi being considered dust. // // The following is equivalent to (value/total_serialized_size) * (1/3) * 1000 // without needing to do floating point math. @@ -101,8 +100,8 @@ pub fn is_transaction_output_dust(transaction_output: &TransactionOutput) -> boo // are considered to avoid overflowing. let value = transaction_output.value; match value.checked_mul(1000) { - Some(value_1000) => value_1000 / (3 * total_serialized_size) < MINIMUM_RELAY_TRANSACTION_FEE, - None => (value as u128 * 1000 / (3 * total_serialized_size as u128)) < MINIMUM_RELAY_TRANSACTION_FEE as u128, + Some(value_1000) => value_1000 / (3 * total_serialized_size) < DUST_RELAY_TRANSACTION_FEE, + None => (value as u128 * 1000 / (3 * total_serialized_size as u128)) < DUST_RELAY_TRANSACTION_FEE as u128, } } @@ -114,8 +113,8 @@ pub const STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X: u64 = STANDARD_OUTPUT_SIZE_PL // pub fn is_standard_output_amount_dust(value: u64) -> bool { // match value.checked_mul(1000) { -// Some(value_1000) => value_1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X < MINIMUM_RELAY_TRANSACTION_FEE, -// None => (value as u128 * 1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X as u128) < MINIMUM_RELAY_TRANSACTION_FEE as u128, +// Some(value_1000) => value_1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X < DUST_RELAY_TRANSACTION_FEE, +// None => (value as u128 * 1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X as u128) < DUST_RELAY_TRANSACTION_FEE as u128, // } // } @@ -125,8 +124,8 @@ pub const STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X: u64 = STANDARD_OUTPUT_SIZE_PL // // return value < dust_threshold_sompi; // // } else { // match value.checked_mul(1000) { -// Some(value_1000) => value_1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X < MINIMUM_RELAY_TRANSACTION_FEE, -// None => (value as u128 * 1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X as u128) < MINIMUM_RELAY_TRANSACTION_FEE as u128, +// Some(value_1000) => value_1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X < DUST_RELAY_TRANSACTION_FEE, +// None => (value as u128 * 1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X as u128) < DUST_RELAY_TRANSACTION_FEE as u128, // } // // } // } @@ -231,8 +230,8 @@ impl MassCalculator { pub fn is_dust(&self, value: u64) -> bool { match value.checked_mul(1000) { - Some(value_1000) => value_1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X < MINIMUM_RELAY_TRANSACTION_FEE, - None => (value as u128 * 1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X as u128) < MINIMUM_RELAY_TRANSACTION_FEE as u128, + Some(value_1000) => value_1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X < DUST_RELAY_TRANSACTION_FEE, + None => (value as u128 * 1000 / STANDARD_OUTPUT_SIZE_PLUS_INPUT_SIZE_3X as u128) < DUST_RELAY_TRANSACTION_FEE as u128, } } @@ -291,7 +290,7 @@ impl MassCalculator { // provisional #[inline(always)] pub fn calc_fee_for_mass(&self, mass: u64) -> u64 { - mass + self.calc_minimum_transaction_fee_from_mass(mass) } pub fn combine_mass(&self, compute_mass: u64, storage_mass: u64) -> u64 {