Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions tokio/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ time = []
io-uring = ["dep:io-uring", "libc", "mio/os-poll", "mio/os-ext", "dep:slab"]
# Unstable feature. Requires `--cfg tokio_unstable` to enable.
taskdump = ["dep:backtrace"]
# Unstable feature. Requires `--cfg tokio_unstable` to enable.
schedule-latency = []

[dependencies]
tokio-macros = { version = "~2.7.0", optional = true }
Expand Down
1 change: 1 addition & 0 deletions tokio/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,7 @@
//! Some feature flags are only available when specifying the `tokio_unstable` flag:
//!
//! - `tracing`: Enables tracing events.
//! - `schedule-latency`: Allows measurement of task scheduling latencies.
//! - `io-uring`: Enables `io-uring` (Linux only).
//! - `taskdump`: Enables `taskdump` (Linux only).
//!
Expand Down
19 changes: 19 additions & 0 deletions tokio/src/macros/cfg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -746,3 +746,22 @@ macro_rules! cfg_io_uring {
)*
};
}

macro_rules! cfg_schedule_latency {
($($item:item)*) => {
$(
#[cfg(all(tokio_unstable, feature = "schedule-latency", target_pointer_width = "64"))]
#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "schedule-latency"))))]
$item
)*
};
}

macro_rules! cfg_not_schedule_latency {
($($item:item)*) => {
$(
#[cfg(any(not(tokio_unstable), not(feature = "schedule-latency"), not(target_pointer_width = "64")))]
$item
)*
}
}
150 changes: 150 additions & 0 deletions tokio/src/runtime/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,12 @@ pub struct Builder {
/// Configures the task poll count histogram
pub(super) metrics_poll_count_histogram: HistogramBuilder,

/// When true, enables task schedule latency instrumentation.
pub(super) metrics_schedule_latency_histogram_enabled: bool,

/// Configures the task schedule latency histogram.
pub(super) metrics_schedule_latency_histogram: HistogramBuilder,

#[cfg(tokio_unstable)]
pub(super) unhandled_panic: UnhandledPanic,

Expand Down Expand Up @@ -335,6 +341,10 @@ impl Builder {

metrics_poll_count_histogram: HistogramBuilder::default(),

metrics_schedule_latency_histogram_enabled: false,

metrics_schedule_latency_histogram: HistogramBuilder::default(),

disable_lifo_slot: false,

timer_flavor: TimerFlavor::Traditional,
Expand Down Expand Up @@ -1708,6 +1718,8 @@ impl Builder {
enable_eager_driver_handoff: false,
seed_generator: seed_generator_1,
metrics_poll_count_histogram: self.metrics_poll_count_histogram_builder(),
metrics_schedule_latency_histogram: self
.metrics_schedule_latency_histogram_builder(),
},
local_tid,
self.name.clone(),
Expand All @@ -1727,6 +1739,14 @@ impl Builder {
None
}
}

fn metrics_schedule_latency_histogram_builder(&self) -> Option<HistogramBuilder> {
if self.metrics_schedule_latency_histogram_enabled {
Some(self.metrics_schedule_latency_histogram.clone())
} else {
None
}
}
}

cfg_io_driver! {
Expand Down Expand Up @@ -1848,6 +1868,135 @@ cfg_test_util! {
}
}

cfg_schedule_latency! {
impl Builder {
/// Enables tracking the distribution of task schedule latencies. Task
/// schedule latency is the time between when a task is scheduled for
/// execution and when it is polled.
///
/// **This feature is only supported on 64-bit targets.**
///
/// Task schedule latencies are not instrumented by default as doing
/// so requires calling [`Instant::now()`] when a task is scheduled
/// and when it is polled, which could add measurable overhead. Use
/// the [`Handle::metrics()`] to access the metrics data.
///
/// By default, a linear histogram with 10 buckets each 100 microseconds wide will be used.
/// This has an extremely low memory footprint, but may not provide enough granularity. For
/// better granularity with low memory usage, use [`metrics_schedule_latency_histogram_configuration()`]
/// to select [`LogHistogram`] instead.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_metrics_schedule_latency_histogram()
/// .build()
/// .unwrap();
/// # // Test default values here
/// # fn us(n: u64) -> std::time::Duration { std::time::Duration::from_micros(n) }
/// # let m = rt.handle().metrics();
/// # assert_eq!(m.schedule_latency_histogram_num_buckets(), 10);
/// # assert_eq!(m.schedule_latency_histogram_bucket_range(0), us(0)..us(100));
/// # assert_eq!(m.schedule_latency_histogram_bucket_range(1), us(100)..us(200));
/// # }
/// ```
///
/// [`Handle::metrics()`]: crate::runtime::Handle::metrics
/// [`Instant::now()`]: std::time::Instant::now
/// [`LogHistogram`]: crate::runtime::LogHistogram
/// [`metrics_schedule_latency_histogram_configuration()`]: Builder::metrics_schedule_latency_histogram_configuration
pub fn enable_metrics_schedule_latency_histogram(&mut self) -> &mut Self {
self.metrics_schedule_latency_histogram_enabled = true;
self
}

/// Configure the histogram for tracking task schedule latencies.
///
/// Tracking of task schedule latencies must be enabled with
/// [`enable_metrics_schedule_latency_histogram()`] for this function
/// to have any effect.
///
/// By default, a linear histogram with 10 buckets each 100 microseconds wide will be used.
/// This has an extremely low memory footprint, but may not provide enough granularity. For
/// better granularity with low memory usage, use [`LogHistogram`] instead.
///
/// # Examples
/// Configure a [`LogHistogram`] with [default configuration]:
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
/// use tokio::runtime::{HistogramConfiguration, LogHistogram};
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_metrics_schedule_latency_histogram()
/// .metrics_schedule_latency_histogram_configuration(
/// HistogramConfiguration::log(LogHistogram::default())
/// )
/// .build()
/// .unwrap();
/// # }
/// ```
///
/// Configure a linear histogram with 100 buckets, each 10μs wide
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
/// use std::time::Duration;
/// use tokio::runtime::HistogramConfiguration;
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_metrics_schedule_latency_histogram()
/// .metrics_schedule_latency_histogram_configuration(
/// HistogramConfiguration::linear(Duration::from_micros(10), 100)
/// )
/// .build()
/// .unwrap();
/// # }
/// ```
///
/// Configure a [`LogHistogram`] with the following settings:
/// - Measure times from 100ns to 120s
/// - Max error of 0.1
/// - No more than 1024 buckets
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::time::Duration;
/// use tokio::runtime;
/// use tokio::runtime::{HistogramConfiguration, LogHistogram};
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_metrics_schedule_latency_histogram()
/// .metrics_schedule_latency_histogram_configuration(
/// HistogramConfiguration::log(LogHistogram::builder()
/// .max_value(Duration::from_secs(120))
/// .min_value(Duration::from_nanos(100))
/// .max_error(0.1)
/// .max_buckets(1024)
/// .expect("configuration uses 488 buckets")
/// )
/// )
/// .build()
/// .unwrap();
/// # }
/// ```
///
/// [`LogHistogram`]: crate::runtime::LogHistogram
/// [`enable_metrics_schedule_latency_histogram()`]: Builder::enable_metrics_schedule_latency_histogram
pub fn metrics_schedule_latency_histogram_configuration(&mut self, configuration: HistogramConfiguration) -> &mut Self {
self.metrics_schedule_latency_histogram.histogram_type = configuration.inner;
self
}
}
}

cfg_rt_multi_thread! {
impl Builder {
fn build_threaded_runtime(&mut self) -> io::Result<Runtime> {
Expand Down Expand Up @@ -1891,6 +2040,7 @@ cfg_rt_multi_thread! {
enable_eager_driver_handoff: self.enable_eager_driver_handoff,
seed_generator: seed_generator_1,
metrics_poll_count_histogram: self.metrics_poll_count_histogram_builder(),
metrics_schedule_latency_histogram: self.metrics_schedule_latency_histogram_builder(),
},
self.timer_flavor,
self.name.clone(),
Expand Down
3 changes: 3 additions & 0 deletions tokio/src/runtime/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ pub(crate) struct Config {
/// How to build poll time histograms
pub(crate) metrics_poll_count_histogram: Option<crate::runtime::HistogramBuilder>,

/// How to build schedule latency histograms
pub(crate) metrics_schedule_latency_histogram: Option<crate::runtime::HistogramBuilder>,

#[cfg(tokio_unstable)]
/// How to respond to unhandled task panics.
pub(crate) unhandled_panic: crate::runtime::UnhandledPanic,
Expand Down
35 changes: 33 additions & 2 deletions tokio/src/runtime/metrics/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ cfg_unstable_metrics! {
use crate::runtime::metrics::HistogramBatch;
}

use crate::runtime::metrics::ScheduleLatencyContext;
use std::sync::atomic::Ordering::Relaxed;
use std::time::{Duration, Instant};

Expand Down Expand Up @@ -53,6 +54,9 @@ pub(crate) struct MetricsBatch {
#[cfg(tokio_unstable)]
/// If `Some`, tracks poll times in nanoseconds
poll_timer: Option<PollTimer>,

#[cfg(tokio_unstable)]
schedule_latencies: Option<HistogramBatch>,
}

cfg_unstable_metrics! {
Expand Down Expand Up @@ -95,6 +99,13 @@ impl MetricsBatch {
poll_started_at: now,
})
});
// Schedule latencies cannot be tracked if `Instant::now()` is unavailable
let schedule_latencies = maybe_now.and_then(|_| {
worker_metrics
.schedule_latency_histogram
.as_ref()
.map(HistogramBatch::from_histogram)
});
MetricsBatch {
park_count: 0,
park_unpark_count: 0,
Expand All @@ -108,6 +119,7 @@ impl MetricsBatch {
busy_duration_total: 0,
processing_scheduled_tasks_started_at: maybe_now,
poll_timer,
schedule_latencies,
}
}
}
Expand Down Expand Up @@ -155,6 +167,11 @@ impl MetricsBatch {
let dst = worker.poll_count_histogram.as_ref().unwrap();
poll_timer.poll_counts.submit(dst);
}

if let Some(schedule_latencies) = &self.schedule_latencies {
let dst = worker.schedule_latency_histogram.as_ref().unwrap();
schedule_latencies.submit(dst);
}
}
}
}
Expand Down Expand Up @@ -206,15 +223,29 @@ impl MetricsBatch {
cfg_metrics_variant! {
stable: {
/// Start polling an individual task
pub(crate) fn start_poll(&mut self) {}
pub(crate) fn start_poll(&mut self, _task_scheduled_at: Option<ScheduleLatencyContext>) {}
},
unstable: {
/// Start polling an individual task
pub(crate) fn start_poll(&mut self) {
///
/// # Arguments
///
/// `task_scheduled_at` is used to calculate task schedule latency.
/// A `ScheduleLatencyContext` can be obtained by calling `prepare` on a task's
/// `ScheduleLatencyInstant`.
pub(crate) fn start_poll(&mut self, task_scheduled_at: Option<ScheduleLatencyContext>) {
self.poll_count += 1;
if let Some(poll_timer) = &mut self.poll_timer {
poll_timer.poll_started_at = Instant::now();
}
if let Some(task_scheduled_at) = task_scheduled_at {
if let Some(schedule_latencies) = &mut self.schedule_latencies {
if let Some(now) = self.poll_timer.as_ref().map(|p| p.poll_started_at).or_else(now) {
let elapsed = task_scheduled_at.elapsed_nanos(now);
schedule_latencies.measure(elapsed, 1);
}
}
}
}
}
}
Expand Down
10 changes: 10 additions & 0 deletions tokio/src/runtime/metrics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,13 @@ cfg_not_unstable_metrics! {
mod mock;
pub(crate) use mock::{SchedulerMetrics, HistogramBuilder};
}

cfg_schedule_latency! {
mod schedule_latency;
pub(crate) use schedule_latency::{ScheduleLatencyInstant, ScheduleLatencyContext};
}

cfg_not_schedule_latency! {
mod schedule_latency_mock;
pub(crate) use schedule_latency_mock::{ScheduleLatencyInstant, ScheduleLatencyContext};
}
Loading
Loading