diff --git a/crates/anvil/src/cmd.rs b/crates/anvil/src/cmd.rs index e492697911693..fb75529b82908 100644 --- a/crates/anvil/src/cmd.rs +++ b/crates/anvil/src/cmd.rs @@ -440,8 +440,8 @@ pub struct AnvilEvmArgs { /// If you want to fetch state from a specific block number, add a block number like `http://localhost:8545@1400000` or use the `--fork-block-number` argument. /// /// Multiple `--fork-url` flags can be provided to distribute requests across endpoints - /// with automatic failover. Requests are routed to the best-performing endpoint based on - /// latency and success rate. + /// using round-robin load balancing. On failure, the retry layer rotates to the next + /// endpoint. #[arg( long, short, diff --git a/crates/anvil/src/config.rs b/crates/anvil/src/config.rs index 605c725041ea7..23cd6e61bc076 100644 --- a/crates/anvil/src/config.rs +++ b/crates/anvil/src/config.rs @@ -135,8 +135,8 @@ pub struct NodeConfig { /// maximum number of transactions in a block pub max_transactions: usize, /// Fork URLs for RPC calls. The first entry is the primary endpoint. - /// When multiple URLs are provided, requests are distributed using Alloy's - /// `FallbackService` with automatic failover based on endpoint health. + /// When multiple URLs are provided, requests are distributed using + /// round-robin load balancing with retry-based failover. pub fork_urls: Vec, /// pins the block number or transaction hash for the state fork pub fork_choice: Option, @@ -1266,21 +1266,20 @@ impl NodeConfig { fees: &FeeManager, ) -> Result<(ForkedDatabase, ClientForkConfig)> { debug!(target: "node", ?eth_rpc_url, "setting up fork db"); - let builder = ProviderBuilder::new(ð_rpc_url) - .timeout(self.fork_request_timeout) - .initial_backoff(self.fork_retry_backoff.as_millis() as u64) - .compute_units_per_second(self.compute_units_per_second) - .max_retry(self.fork_request_retries) - .headers(self.fork_headers.clone()); - - let provider = Arc::new(if self.fork_urls.len() > 1 { - debug!(target: "node", urls=?self.fork_urls, "using multi-endpoint fallback provider"); - builder - .build_fallback(self.fork_urls.clone()) - .wrap_err("failed to establish fallback provider to fork urls")? - } else { - builder.build().wrap_err("failed to establish provider to fork url")? - }); + + // Always bootstrap with the primary URL only to avoid race conditions + // where discovery calls (get_chain_id, find_latest_fork_block, get_block) + // hit different endpoints that may be at different chain tips. + let provider = Arc::new( + ProviderBuilder::new(ð_rpc_url) + .timeout(self.fork_request_timeout) + .initial_backoff(self.fork_retry_backoff.as_millis() as u64) + .compute_units_per_second(self.compute_units_per_second) + .max_retry(self.fork_request_retries) + .headers(self.fork_headers.clone()) + .build() + .wrap_err("failed to establish provider to fork url")?, + ); let (fork_block_number, fork_chain_id, force_transactions) = if let Some(fork_choice) = &self.fork_choice @@ -1432,6 +1431,25 @@ latest block number: {latest_block}" BlockchainDb::new(meta, self.block_cache_path(fork_block_number)) }; + // After bootstrap, rebuild the provider with round-robin if multiple URLs are + // configured. This ensures bootstrap used only the primary endpoint for consistency, + // while ongoing requests are distributed across all endpoints. + let provider = if self.fork_urls.len() > 1 { + debug!(target: "node", urls=?self.fork_urls, "using multi-endpoint round-robin provider"); + Arc::new( + ProviderBuilder::new(ð_rpc_url) + .timeout(self.fork_request_timeout) + .initial_backoff(self.fork_retry_backoff.as_millis() as u64) + .compute_units_per_second(self.compute_units_per_second) + .max_retry(self.fork_request_retries) + .headers(self.fork_headers.clone()) + .build_fallback(self.fork_urls.clone()) + .wrap_err("failed to establish round-robin provider to fork urls")?, + ) + } else { + provider + }; + // This will spawn the background thread that will use the provider to fetch // blockchain data from the other client let backend = SharedBackend::spawn_backend( diff --git a/crates/anvil/src/eth/backend/fork.rs b/crates/anvil/src/eth/backend/fork.rs index d6cb500f60d0f..bc87fe2fc2052 100644 --- a/crates/anvil/src/eth/backend/fork.rs +++ b/crates/anvil/src/eth/backend/fork.rs @@ -631,7 +631,7 @@ impl ClientFork { pub struct ClientForkConfig { /// All fork URLs. The first entry is the primary endpoint. /// When multiple URLs are present, requests are distributed using - /// `FallbackService` with automatic failover. + /// round-robin load balancing with retry-based failover. pub fork_urls: Vec, /// The block number of the forked block pub block_number: u64, diff --git a/crates/anvil/tests/it/fork.rs b/crates/anvil/tests/it/fork.rs index 63d6c62e05e2b..37d3f6f4a1dc4 100644 --- a/crates/anvil/tests/it/fork.rs +++ b/crates/anvil/tests/it/fork.rs @@ -2022,8 +2022,7 @@ async fn test_anvil_set_rpc_url_syncs_fork_config() { let (_origin_api, origin_handle) = spawn(NodeConfig::test()).await; let origin_url = origin_handle.http_endpoint(); - let (api, _handle) = - spawn(NodeConfig::test().with_eth_rpc_url(Some(origin_url.clone()))).await; + let (api, _handle) = spawn(NodeConfig::test().with_eth_rpc_url(Some(origin_url.clone()))).await; // Verify initial fork URL let fork = api.backend.get_fork().unwrap(); @@ -2051,8 +2050,7 @@ async fn test_anvil_reset_with_url_updates_fork_urls() { let (_origin_api, origin_handle) = spawn(NodeConfig::test()).await; let origin_url = origin_handle.http_endpoint(); - let (api, _handle) = - spawn(NodeConfig::test().with_eth_rpc_url(Some(origin_url.clone()))).await; + let (api, _handle) = spawn(NodeConfig::test().with_eth_rpc_url(Some(origin_url.clone()))).await; // Spawn a second origin let (_origin2_api, origin2_handle) = spawn(NodeConfig::test()).await; diff --git a/crates/common/src/provider/mod.rs b/crates/common/src/provider/mod.rs index 17a1d64818750..1620342989a29 100644 --- a/crates/common/src/provider/mod.rs +++ b/crates/common/src/provider/mod.rs @@ -95,8 +95,13 @@ pub struct RoundRobinService { } impl RoundRobinService { - /// Creates a new round-robin service from a list of transports. + /// Creates a new round-robin service from a non-empty list of transports. + /// + /// # Panics + /// + /// Panics if `transports` is empty. pub fn new(transports: Vec) -> Self { + assert!(!transports.is_empty(), "RoundRobinService requires at least one transport"); Self { transports: Arc::new(transports), next: Arc::new(AtomicUsize::new(0)) } } } @@ -422,15 +427,12 @@ impl ProviderBuilder { } impl ProviderBuilder { - /// Constructs a `RetryProvider` backed by multiple URLs using Alloy's `FallbackService`. - /// - /// Requests are distributed across all provided endpoints using a scored strategy: - /// the top `active_transport_count` endpoints (by latency + success rate) are queried, - /// and the first successful response wins. Endpoints that return errors or time out - /// are automatically deprioritized. + /// Constructs a `RetryProvider` backed by multiple URLs using round-robin load balancing. /// - /// Set `active_transport_count` to 1 for sequential (round-robin-like) behavior where - /// only the best-scored endpoint handles each request. + /// Each request is sent to exactly one transport, rotating through the list via + /// [`RoundRobinService`]. There is no health scoring or endpoint deprioritization. + /// On failure, the `RetryBackoffLayer` retries the request, which naturally hits + /// the next transport in the rotation. pub fn build_fallback(self, urls: Vec) -> Result> { let Self { chain, @@ -451,11 +453,13 @@ impl ProviderBuilder { // Build a RuntimeTransport for each URL, using the same URL normalization // as ProviderBuilder::new() (handles localhost:port, raw socket addrs, IPC paths) + let mut parsed_urls = Vec::with_capacity(urls.len()); let transports: Vec<_> = urls .iter() .map(|url_str| { let builder = Self::new(url_str); let url = builder.url?; + parsed_urls.push(url.clone()); Ok(RuntimeTransportBuilder::new(url) .with_timeout(timeout) .with_headers(headers.clone()) @@ -466,14 +470,12 @@ impl ProviderBuilder { }) .collect::>>()?; - // Wrap in RoundRobinService: each request goes to one transport, rotating. - // On failure, the next transport in the ring is tried before returning an error. let round_robin = RoundRobinService::new(transports); - // Apply retry layer on top of the round-robin service let retry_layer = RetryBackoffLayer::new(max_retry, initial_backoff, compute_units_per_second); - let is_local = urls.iter().all(guess_local_url); + // Use normalized/parsed URLs for local detection, consistent with build() + let is_local = parsed_urls.iter().all(|url| guess_local_url(url.as_str())); let client = ClientBuilder::default().layer(retry_layer).transport(round_robin, is_local); if !is_local {