Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
137 changes: 137 additions & 0 deletions tokio/src/runtime/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,10 @@ pub struct Builder {
/// Configures the task poll count histogram
pub(super) metrics_poll_count_histogram: HistogramBuilder,

pub(super) metrics_schedule_latency_histogram_enable: bool,
Comment thread
robholt marked this conversation as resolved.
Outdated

pub(super) metrics_schedule_latency_histogram: HistogramBuilder,

#[cfg(tokio_unstable)]
pub(super) unhandled_panic: UnhandledPanic,

Expand Down Expand Up @@ -323,6 +327,10 @@ impl Builder {

metrics_poll_count_histogram: HistogramBuilder::default(),

metrics_schedule_latency_histogram_enable: false,

metrics_schedule_latency_histogram: HistogramBuilder::default(),

disable_lifo_slot: false,

timer_flavor: TimerFlavor::Traditional,
Expand Down Expand Up @@ -1556,6 +1564,124 @@ impl Builder {
self.metrics_poll_count_histogram.legacy_mut(|b|b.num_buckets = buckets);
self
}

/// Enables tracking the distribution of task schedule latencies. Task
/// schedule latency is the time between when a task is scheduled for
/// execution and when it is polled.
///
/// Task schedule latencies are not instrumented by default as doing
/// so requires calling [`Instant::now()`] twice per task poll, which
Comment thread
robholt marked this conversation as resolved.
Outdated
/// could add measurable overhead. Use the [`Handle::metrics()`] to
/// access the metrics data.
///
/// By default, a linear histogram with 10 buckets each 100 microseconds wide will be used.
/// This has an extremely low memory footprint, but may not provide enough granularity. For
/// better granularity with low memory usage, use [`metrics_schedule_latency_histogram_configuration()`]
/// to select [`LogHistogram`] instead.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_metrics_schedule_latency_histogram()
/// .build()
/// .unwrap();
/// # // Test default values here
/// # fn us(n: u64) -> std::time::Duration { std::time::Duration::from_micros(n) }
/// # let m = rt.handle().metrics();
/// # assert_eq!(m.schedule_latency_histogram_num_buckets(), 10);
/// # assert_eq!(m.schedule_latency_histogram_bucket_range(0), us(0)..us(100));
/// # assert_eq!(m.schedule_latency_histogram_bucket_range(1), us(100)..us(200));
/// # }
/// ```
///
/// [`Handle::metrics()`]: crate::runtime::Handle::metrics
/// [`Instant::now()`]: std::time::Instant::now
/// [`LogHistogram`]: crate::runtime::LogHistogram
/// [`metrics_schedule_latency_histogram_configuration()`]: Builder::metrics_schedule_latency_histogram_configuration
pub fn enable_metrics_schedule_latency_histogram(&mut self) -> &mut Self {
self.metrics_schedule_latency_histogram_enable = true;
self
}

/// Configure the histogram for tracking task schedule latencies.
///
/// By default, a linear histogram with 10 buckets each 100 microseconds wide will be used.
/// This has an extremely low memory footprint, but may not provide enough granularity. For
/// better granularity with low memory usage, use [`LogHistogram`] instead.
///
/// # Examples
/// Configure a [`LogHistogram`] with [default configuration]:
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
/// use tokio::runtime::{HistogramConfiguration, LogHistogram};
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_metrics_schedule_latency_histogram()
/// .metrics_schedule_latency_histogram_configuration(
/// HistogramConfiguration::log(LogHistogram::default())
/// )
/// .build()
/// .unwrap();
/// # }
/// ```
///
/// Configure a linear histogram with 100 buckets, each 10μs wide
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::runtime;
/// use std::time::Duration;
/// use tokio::runtime::HistogramConfiguration;
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_metrics_schedule_latency_histogram()
/// .metrics_schedule_latency_histogram_configuration(
/// HistogramConfiguration::linear(Duration::from_micros(10), 100)
/// )
/// .build()
/// .unwrap();
/// # }
/// ```
///
/// Configure a [`LogHistogram`] with the following settings:
/// - Measure times from 100ns to 120s
/// - Max error of 0.1
/// - No more than 1024 buckets
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::time::Duration;
/// use tokio::runtime;
/// use tokio::runtime::{HistogramConfiguration, LogHistogram};
///
/// let rt = runtime::Builder::new_multi_thread()
/// .enable_metrics_schedule_latency_histogram()
/// .metrics_schedule_latency_histogram_configuration(
/// HistogramConfiguration::log(LogHistogram::builder()
/// .max_value(Duration::from_secs(120))
/// .min_value(Duration::from_nanos(100))
/// .max_error(0.1)
/// .max_buckets(1024)
/// .expect("configuration uses 488 buckets")
/// )
/// )
/// .build()
/// .unwrap();
/// # }
/// ```
///
/// [`LogHistogram`]: crate::runtime::LogHistogram
pub fn metrics_schedule_latency_histogram_configuration(&mut self, configuration: HistogramConfiguration) -> &mut Self {
Comment thread
robholt marked this conversation as resolved.
Outdated
self.metrics_schedule_latency_histogram.histogram_type = configuration.inner;
self
}
}

fn build_current_thread_runtime(&mut self) -> io::Result<Runtime> {
Expand Down Expand Up @@ -1631,6 +1757,8 @@ impl Builder {
disable_lifo_slot: self.disable_lifo_slot,
seed_generator: seed_generator_1,
metrics_poll_count_histogram: self.metrics_poll_count_histogram_builder(),
metrics_schedule_latency_histogram: self
.metrics_schedule_latency_histogram_builder(),
},
local_tid,
);
Expand All @@ -1649,6 +1777,14 @@ impl Builder {
None
}
}

fn metrics_schedule_latency_histogram_builder(&self) -> Option<HistogramBuilder> {
if self.metrics_schedule_latency_histogram_enable {
Some(self.metrics_schedule_latency_histogram.clone())
} else {
None
}
}
}

cfg_io_driver! {
Expand Down Expand Up @@ -1812,6 +1948,7 @@ cfg_rt_multi_thread! {
disable_lifo_slot: self.disable_lifo_slot,
seed_generator: seed_generator_1,
metrics_poll_count_histogram: self.metrics_poll_count_histogram_builder(),
metrics_schedule_latency_histogram: self.metrics_schedule_latency_histogram_builder(),
},
self.timer_flavor,
);
Expand Down
3 changes: 3 additions & 0 deletions tokio/src/runtime/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ pub(crate) struct Config {
/// How to build poll time histograms
pub(crate) metrics_poll_count_histogram: Option<crate::runtime::HistogramBuilder>,

/// How to build schedule latency histograms
pub(crate) metrics_schedule_latency_histogram: Option<crate::runtime::HistogramBuilder>,

#[cfg(tokio_unstable)]
/// How to respond to unhandled task panics.
pub(crate) unhandled_panic: crate::runtime::UnhandledPanic,
Expand Down
24 changes: 22 additions & 2 deletions tokio/src/runtime/metrics/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ pub(crate) struct MetricsBatch {
#[cfg(tokio_unstable)]
/// If `Some`, tracks poll times in nanoseconds
poll_timer: Option<PollTimer>,

#[cfg(tokio_unstable)]
schedule_latencies: Option<HistogramBatch>,
Comment on lines +58 to +59
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think I would prefer the following approach:

  1. Everywhere in the code it's just protected by #[cfg(feature = "schedule-latency")] only.
  2. In lib.rs you emit a compile_error! if you enable schedule-latency without --cfg tokio_unstable.

Right now the cfgs are pretty inconsistent. Sometimes you only require tokio_unstable. Sometimes you require target_pointer_width = "64". Sometimes you require target_has_atomic = "64". Please make these cfgs consistent.

}

cfg_unstable_metrics! {
Expand Down Expand Up @@ -95,6 +98,10 @@ impl MetricsBatch {
poll_started_at: now,
})
});
let schedule_latencies = worker_metrics
Comment thread
robholt marked this conversation as resolved.
Outdated
.schedule_latency_histogram
.as_ref()
.map(HistogramBatch::from_histogram);
MetricsBatch {
park_count: 0,
park_unpark_count: 0,
Expand All @@ -108,6 +115,7 @@ impl MetricsBatch {
busy_duration_total: 0,
processing_scheduled_tasks_started_at: maybe_now,
poll_timer,
schedule_latencies,
}
}
}
Expand Down Expand Up @@ -155,6 +163,11 @@ impl MetricsBatch {
let dst = worker.poll_count_histogram.as_ref().unwrap();
poll_timer.poll_counts.submit(dst);
}

if let Some(schedule_latencies) = &self.schedule_latencies {
let dst = worker.schedule_latency_histogram.as_ref().unwrap();
schedule_latencies.submit(dst);
}
}
}
}
Expand Down Expand Up @@ -206,15 +219,22 @@ impl MetricsBatch {
cfg_metrics_variant! {
stable: {
/// Start polling an individual task
pub(crate) fn start_poll(&mut self) {}
pub(crate) fn start_poll(&mut self, _task_scheduled_at: Option<Instant>) {}
},
unstable: {
/// Start polling an individual task
pub(crate) fn start_poll(&mut self) {
pub(crate) fn start_poll(&mut self, task_scheduled_at: Option<Instant>) {
self.poll_count += 1;
if let Some(poll_timer) = &mut self.poll_timer {
poll_timer.poll_started_at = Instant::now();
}
if let Some(task_scheduled_at) = task_scheduled_at {
if let Some(schedule_latencies) = &mut self.schedule_latencies {
let now = self.poll_timer.as_ref().map(|p| p.poll_started_at).unwrap_or_else(Instant::now);
let elapsed = duration_as_u64(now - task_scheduled_at);
schedule_latencies.measure(elapsed, 1);
Comment thread
robholt marked this conversation as resolved.
Outdated
}
}
}
}
}
Expand Down
Loading
Loading