From 74bc50e46b1fe057e2e8d4ff06db3202fe1cf443 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 8 May 2026 09:42:09 -0700 Subject: [PATCH] Add time-range filtering to support bundle log collection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Resolves #10372. Adds a bundle-wide time-range concept that flows from the omdb CLI down through BundleDataSelection, the support-bundle-collection mechanism, the sled-agent API, and into sled-diagnostics, where the per-file mtime gate is applied to archived and extra log files (current logs are always included regardless of the window). Highlights: - New `BundleTimeRange { start, end }` type in `nexus_types::support_bundle`. The window is bundle-wide: a single `BundleDataSelection::time_range` field, applied to both host-info log collection and ereport queries. - `BundleDataSelection::all()` defaults to a 7-day window — same default as ereports already had; logs gain the same cap where they had none. - Sled-agent API gets a new `VERSION_ADD_LOG_TIME_RANGE` (v38) that reshapes `SledDiagnosticsLogsDownloadQueryParam`: `max_rotated` becomes optional; `start_time`/`end_time` are added. - `sled_diagnostics::LogsHandle::get_zone_logs` keeps oxlog's `date_range: None` and applies the time gate explicitly inside `get_zone_logs_inner`. Files with unknown mtime are over-inclusively kept. - Sim sled-agent gains a `SimLogEntry` injection surface so tests can construct synthetic log files with controlled mtimes; default behavior (no entries injected) remains an empty zone list. - omdb `support-bundle collect` gains `--since ` and `--until ` flags (humantime, both relative to now). - Persistence: schema v257 adds bundle-level `support_bundle_data_selection_time_range` and the FM equivalent, keyed by bundle / sitrep+request id. Existing per-category time columns on the ereports tables are migrated and dropped. - Integration test in `nexus/tests/integration_tests/support_bundles.rs` injects entries on the sim sled-agent at 30 m, 6 h, and 30 d ago, collects with a 24 h window and a 7 d window, and asserts only the recent entries land in the bundle. --- .../src/bin/omdb/support_bundle_collect.rs | 44 ++++- dev-tools/omdb/tests/successes.out | 72 ++++---- .../db-model/src/fm/support_bundle_request.rs | 83 ++++++--- nexus/db-model/src/schema_versions.rs | 3 +- nexus/db-model/src/support_bundle.rs | 83 +++++---- nexus/db-queries/src/db/datastore/ereport.rs | 78 +++++--- nexus/db-queries/src/db/datastore/fm.rs | 66 +++++-- .../src/db/datastore/support_bundle.rs | 74 +++++--- nexus/db-schema/src/schema.rs | 23 ++- .../src/test_util/host_phase_2_test_state.rs | 12 ++ .../integration_tests/support_bundles.rs | 166 ++++++++++++++++++ nexus/types/src/fm/ereport.rs | 127 +------------- nexus/types/src/support_bundle.rs | 104 ++++++++--- .../sled-agent-37.0.0-a1f825.json.gitstub | 1 + ...825.json => sled-agent-38.0.0-1db2eb.json} | 26 ++- openapi/sled-agent/sled-agent-latest.json | 2 +- schema/crdb/dbinit.sql | 41 ++++- schema/crdb/support-bundle-time-range/up1.sql | 10 ++ schema/crdb/support-bundle-time-range/up2.sql | 11 ++ schema/crdb/support-bundle-time-range/up3.sql | 23 +++ schema/crdb/support-bundle-time-range/up4.sql | 7 + schema/crdb/support-bundle-time-range/up5.sql | 2 + schema/crdb/support-bundle-time-range/up6.sql | 5 + schema/crdb/support-bundle-time-range/up7.sql | 2 + sled-agent/api/src/lib.rs | 21 +++ sled-agent/src/http_entrypoints.rs | 42 ++++- sled-agent/src/sim/http_entrypoints.rs | 57 +++++- sled-agent/src/sim/mod.rs | 2 + sled-agent/src/sim/sim_support_logs.rs | 78 ++++++++ sled-agent/src/sim/sled_agent.rs | 43 +++++ sled-agent/src/support_bundle/logs.rs | 12 +- .../src/add_log_time_range/diagnostics.rs | 48 +++++ .../versions/src/add_log_time_range/mod.rs | 13 ++ sled-agent/types/versions/src/latest.rs | 2 +- sled-agent/types/versions/src/lib.rs | 2 + sled-diagnostics/src/lib.rs | 2 +- sled-diagnostics/src/logs.rs | 105 ++++++++++- .../src/steps/ereports.rs | 6 +- .../src/steps/host_info.rs | 26 ++- 39 files changed, 1164 insertions(+), 360 deletions(-) create mode 100644 openapi/sled-agent/sled-agent-37.0.0-a1f825.json.gitstub rename openapi/sled-agent/{sled-agent-37.0.0-a1f825.json => sled-agent-38.0.0-1db2eb.json} (99%) create mode 100644 schema/crdb/support-bundle-time-range/up1.sql create mode 100644 schema/crdb/support-bundle-time-range/up2.sql create mode 100644 schema/crdb/support-bundle-time-range/up3.sql create mode 100644 schema/crdb/support-bundle-time-range/up4.sql create mode 100644 schema/crdb/support-bundle-time-range/up5.sql create mode 100644 schema/crdb/support-bundle-time-range/up6.sql create mode 100644 schema/crdb/support-bundle-time-range/up7.sql create mode 100644 sled-agent/src/sim/sim_support_logs.rs create mode 100644 sled-agent/types/versions/src/add_log_time_range/diagnostics.rs create mode 100644 sled-agent/types/versions/src/add_log_time_range/mod.rs diff --git a/dev-tools/omdb/src/bin/omdb/support_bundle_collect.rs b/dev-tools/omdb/src/bin/omdb/support_bundle_collect.rs index 0a7a0af2d9a..8b527b84023 100644 --- a/dev-tools/omdb/src/bin/omdb/support_bundle_collect.rs +++ b/dev-tools/omdb/src/bin/omdb/support_bundle_collect.rs @@ -28,10 +28,12 @@ use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_types::fm::ereport::EreportFilters; use nexus_types::support_bundle::BundleDataSelection; +use nexus_types::support_bundle::BundleTimeRange; use omicron_uuid_kinds::SupportBundleUuid; use std::io::Seek; use std::io::SeekFrom; use std::sync::Arc; +use std::time::Duration; use support_bundle_collection::BundleCollection; use support_bundle_collection::BundleInfo; use support_bundle_collection::zip::bundle_to_zipfile; @@ -89,6 +91,19 @@ struct CollectArgs { /// Defaults to all categories. #[clap(long, value_enum)] include: Vec, + + /// Collect data newer than this duration ago. Applies to both + /// service logs and ereports. Defaults to 7 days when `--since` + /// is not set. + #[clap(long, value_parser = humantime::parse_duration)] + since: Option, + + /// Collect data older than this duration ago. Truncates the + /// upper bound of the window. Applies to both service logs and + /// ereports. Defaults to no upper bound (file mtimes can't be + /// in the future, so this effectively means "up to now"). + #[clap(long, value_parser = humantime::parse_duration)] + until: Option, } impl CollectArgs { @@ -106,17 +121,28 @@ impl CollectArgs { BundleCategory::HostInfo => sel.with_all_sleds(), BundleCategory::SledCubbyInfo => sel.with_sled_cubby_info(), BundleCategory::SpDumps => sel.with_sp_dumps(), - BundleCategory::Ereports => sel.with_ereports( - EreportFilters::new() - .with_start_time( - omicron_common::now_db_precision() - - chrono::Days::new(7), - ) - .expect("no end time set, cannot fail"), - ), + BundleCategory::Ereports => { + sel.with_ereports(EreportFilters::new()) + } }; } - sel + + // Apply a bundle-wide time range. Each flag's default fires + // independently: `--since` defaults to 7 days, `--until` + // defaults to no upper bound. + let now = omicron_common::now_db_precision(); + let range = BundleTimeRange { + start: self + .since + .and_then(|d| chrono::Duration::from_std(d).ok()) + .map(|d| now - d) + .or(Some(now - chrono::Days::new(7))), + end: self + .until + .and_then(|d| chrono::Duration::from_std(d).ok()) + .map(|d| now - d), + }; + sel.with_time_range(range) } } diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 8662889facf..fcdc1a6de67 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -747,23 +747,25 @@ task: "fm_sitrep_gc" configured period: every s last completed activation: , triggered by started at (s ago) and ran for ms - batch size: 1000 - orphaned sitreps deleted: 0 - batches: 1 - orphaned fm_alert_request rows deleted: 0 - batches: 1 - orphaned fm_case rows deleted: 0 - batches: 1 - orphaned fm_ereport_in_case rows deleted: 0 - batches: 1 - orphaned fm_support_bundle_request rows deleted: 0 - batches: 1 - orphaned fm_support_bundle_request_data_selection_ereports rows deleted: 0 - batches: 1 - orphaned fm_support_bundle_request_data_selection_flags rows deleted: 0 - batches: 1 - orphaned fm_support_bundle_request_data_selection_host_info rows deleted: 0 - batches: 1 + batch size: 1000 + orphaned sitreps deleted: 0 + batches: 1 + orphaned fm_alert_request rows deleted: 0 + batches: 1 + orphaned fm_case rows deleted: 0 + batches: 1 + orphaned fm_ereport_in_case rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request_data_selection_ereports rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request_data_selection_flags rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request_data_selection_host_info rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request_data_selection_time_range rows deleted: 0 + batches: 1 task: "fm_sitrep_loader" configured period: every s @@ -1428,23 +1430,25 @@ task: "fm_sitrep_gc" configured period: every s last completed activation: , triggered by started at (s ago) and ran for ms - batch size: 1000 - orphaned sitreps deleted: 0 - batches: 1 - orphaned fm_alert_request rows deleted: 0 - batches: 1 - orphaned fm_case rows deleted: 0 - batches: 1 - orphaned fm_ereport_in_case rows deleted: 0 - batches: 1 - orphaned fm_support_bundle_request rows deleted: 0 - batches: 1 - orphaned fm_support_bundle_request_data_selection_ereports rows deleted: 0 - batches: 1 - orphaned fm_support_bundle_request_data_selection_flags rows deleted: 0 - batches: 1 - orphaned fm_support_bundle_request_data_selection_host_info rows deleted: 0 - batches: 1 + batch size: 1000 + orphaned sitreps deleted: 0 + batches: 1 + orphaned fm_alert_request rows deleted: 0 + batches: 1 + orphaned fm_case rows deleted: 0 + batches: 1 + orphaned fm_ereport_in_case rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request_data_selection_ereports rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request_data_selection_flags rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request_data_selection_host_info rows deleted: 0 + batches: 1 + orphaned fm_support_bundle_request_data_selection_time_range rows deleted: 0 + batches: 1 task: "fm_sitrep_loader" configured period: every s diff --git a/nexus/db-model/src/fm/support_bundle_request.rs b/nexus/db-model/src/fm/support_bundle_request.rs index dc62f3e7653..9164fc06695 100644 --- a/nexus/db-model/src/fm/support_bundle_request.rs +++ b/nexus/db-model/src/fm/support_bundle_request.rs @@ -11,10 +11,11 @@ use nexus_db_schema::schema::{ fm_support_bundle_request_data_selection_ereports, fm_support_bundle_request_data_selection_flags, fm_support_bundle_request_data_selection_host_info, + fm_support_bundle_request_data_selection_time_range, }; use nexus_types::fm; -use nexus_types::fm::ereport::{EreportFilters, EreportFiltersParams}; -use nexus_types::support_bundle::{BundleData, SledSelection}; +use nexus_types::fm::ereport::EreportFilters; +use nexus_types::support_bundle::{BundleData, BundleTimeRange, SledSelection}; use omicron_uuid_kinds::{ CaseKind, GenericUuid, SitrepKind, SledUuid, SupportBundleKind, }; @@ -120,14 +121,14 @@ impl HostInfo { impl From for BundleData { fn from(row: HostInfo) -> Self { let HostInfo { sitrep_id: _, request_id: _, all_sleds, sled_ids } = row; - let selection = if all_sleds { + let sleds = if all_sleds { SledSelection::All } else { SledSelection::Specific( sled_ids.into_iter().map(SledUuid::from_untyped_uuid).collect(), ) }; - BundleData::HostInfo(selection) + BundleData::HostInfo(sleds) } } @@ -136,8 +137,6 @@ impl From for BundleData { pub struct Ereports { pub sitrep_id: DbTypedUuid, pub request_id: DbTypedUuid, - pub start_time: Option>, - pub end_time: Option>, pub only_serials: Vec, pub only_classes: Vec, } @@ -151,39 +150,64 @@ impl Ereports { Ereports { sitrep_id: sitrep_id.into(), request_id: request_id.into(), - start_time: filters.start_time(), - end_time: filters.end_time(), only_serials: filters.only_serials().to_vec(), only_classes: filters.only_classes().to_vec(), } } } -impl TryFrom for BundleData { - type Error = omicron_common::api::external::Error; - - fn try_from(row: Ereports) -> Result { +impl From for BundleData { + fn from(row: Ereports) -> Self { let Ereports { sitrep_id: _, request_id: _, - start_time, - end_time, only_serials, only_classes, } = row; - EreportFiltersParams { - start_time, - end_time, - only_serials, - only_classes, + BundleData::Ereports( + EreportFilters::new() + .with_serials(only_serials) + .with_classes(only_classes), + ) + } +} + +/// Bundle-wide time bound persisted alongside the per-category data +/// selection rows. Reads are merged into +/// [`nexus_types::support_bundle::BundleDataSelection::time_range`]. +#[derive(Queryable, Insertable, Clone, Debug, Selectable)] +#[diesel(table_name = fm_support_bundle_request_data_selection_time_range)] +pub struct TimeRange { + pub sitrep_id: DbTypedUuid, + pub request_id: DbTypedUuid, + pub start_time: Option>, + pub end_time: Option>, +} + +impl TimeRange { + pub fn from_sitrep( + sitrep_id: impl Into>, + request_id: impl Into>, + range: &BundleTimeRange, + ) -> Self { + TimeRange { + sitrep_id: sitrep_id.into(), + request_id: request_id.into(), + start_time: range.start, + end_time: range.end, } - .try_into() - .map(BundleData::Ereports) } } -/// Joined query result: flags + optional host_info + optional ereports. -/// All fields use `#[diesel(embed)]` so no `table_name` is needed. +impl From for BundleTimeRange { + fn from(row: TimeRange) -> Self { + BundleTimeRange { start: row.start_time, end: row.end_time } + } +} + +/// Joined query result: flags + optional host_info + optional ereports + +/// optional bundle-wide time range. All fields use `#[diesel(embed)]` so +/// no `table_name` is needed. #[derive(Queryable, Selectable)] pub struct BundleDataSelection { #[diesel(embed)] @@ -192,14 +216,14 @@ pub struct BundleDataSelection { pub host_info: Option, #[diesel(embed)] pub ereports: Option, + #[diesel(embed)] + pub time_range: Option, } -impl TryFrom +impl From for nexus_types::support_bundle::BundleDataSelection { - type Error = omicron_common::api::external::Error; - - fn try_from(row: BundleDataSelection) -> Result { + fn from(row: BundleDataSelection) -> Self { let mut selection = nexus_types::support_bundle::BundleDataSelection::new(); if row.flags.include_reconfigurator { @@ -215,8 +239,9 @@ impl TryFrom selection.insert(host_info.into()); } if let Some(ereports) = row.ereports { - selection.insert(ereports.try_into()?); + selection.insert(ereports.into()); } - Ok(selection) + selection.set_time_range(row.time_range.map(BundleTimeRange::from)); + selection } } diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index e09c123b732..c17e078db4c 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(256, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(257, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ pub static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(257, "support-bundle-time-range"), KnownVersion::new(256, "bgp-unnumbered-peer-cleanup"), KnownVersion::new(255, "blueprint-add-external-networking-generation"), KnownVersion::new( diff --git a/nexus/db-model/src/support_bundle.rs b/nexus/db-model/src/support_bundle.rs index d1f65b5c2e2..70b7925a2b7 100644 --- a/nexus/db-model/src/support_bundle.rs +++ b/nexus/db-model/src/support_bundle.rs @@ -9,13 +9,15 @@ use nexus_db_schema::schema::{ support_bundle_data_selection_ereports, support_bundle_data_selection_flags, support_bundle_data_selection_host_info, + support_bundle_data_selection_time_range, }; use chrono::{DateTime, Utc}; use nexus_types::external_api::support_bundle as support_bundle_types; -use nexus_types::fm::ereport::{EreportFilters, EreportFiltersParams}; +use nexus_types::fm::ereport::EreportFilters; use nexus_types::internal_api::views as internal_views; use nexus_types::support_bundle::BundleData; +use nexus_types::support_bundle::BundleTimeRange; use nexus_types::support_bundle::SledSelection; use omicron_uuid_kinds::CaseKind; use omicron_uuid_kinds::CaseUuid; @@ -201,14 +203,14 @@ impl HostInfo { impl From for BundleData { fn from(row: HostInfo) -> Self { let HostInfo { bundle_id: _, all_sleds, sled_ids } = row; - let selection = if all_sleds { + let sleds = if all_sleds { SledSelection::All } else { SledSelection::Specific( sled_ids.into_iter().map(SledUuid::from_untyped_uuid).collect(), ) }; - BundleData::HostInfo(selection) + BundleData::HostInfo(sleds) } } @@ -216,8 +218,6 @@ impl From for BundleData { #[diesel(table_name = support_bundle_data_selection_ereports)] pub struct Ereports { pub bundle_id: DbTypedUuid, - pub start_time: Option>, - pub end_time: Option>, pub only_serials: Vec, pub only_classes: Vec, } @@ -229,38 +229,56 @@ impl Ereports { ) -> Self { Ereports { bundle_id: bundle_id.into(), - start_time: filters.start_time(), - end_time: filters.end_time(), only_serials: filters.only_serials().to_vec(), only_classes: filters.only_classes().to_vec(), } } } -impl TryFrom for BundleData { - type Error = omicron_common::api::external::Error; +impl From for BundleData { + fn from(row: Ereports) -> Self { + let Ereports { bundle_id: _, only_serials, only_classes } = row; + BundleData::Ereports( + EreportFilters::new() + .with_serials(only_serials) + .with_classes(only_classes), + ) + } +} + +/// Bundle-wide time bound persisted alongside the per-category data +/// selection rows. Reads are merged into +/// [`nexus_types::support_bundle::BundleDataSelection::time_range`]. +#[derive(Queryable, Insertable, Clone, Debug, Selectable)] +#[diesel(table_name = support_bundle_data_selection_time_range)] +pub struct TimeRange { + pub bundle_id: DbTypedUuid, + pub start_time: Option>, + pub end_time: Option>, +} - fn try_from(row: Ereports) -> Result { - let Ereports { - bundle_id: _, - start_time, - end_time, - only_serials, - only_classes, - } = row; - EreportFiltersParams { - start_time, - end_time, - only_serials, - only_classes, +impl TimeRange { + pub fn new( + bundle_id: impl Into>, + range: &BundleTimeRange, + ) -> Self { + TimeRange { + bundle_id: bundle_id.into(), + start_time: range.start, + end_time: range.end, } - .try_into() - .map(BundleData::Ereports) } } -/// Joined query result: flags + optional host_info + optional ereports. -/// All fields use `#[diesel(embed)]` so no `table_name` is needed. +impl From for BundleTimeRange { + fn from(row: TimeRange) -> Self { + BundleTimeRange { start: row.start_time, end: row.end_time } + } +} + +/// Joined query result: flags + optional host_info + optional ereports + +/// optional bundle-wide time range. All fields use `#[diesel(embed)]` so +/// no `table_name` is needed. #[derive(Queryable, Selectable)] pub struct BundleDataSelection { #[diesel(embed)] @@ -269,14 +287,14 @@ pub struct BundleDataSelection { pub host_info: Option, #[diesel(embed)] pub ereports: Option, + #[diesel(embed)] + pub time_range: Option, } -impl TryFrom +impl From for nexus_types::support_bundle::BundleDataSelection { - type Error = omicron_common::api::external::Error; - - fn try_from(row: BundleDataSelection) -> Result { + fn from(row: BundleDataSelection) -> Self { let mut selection = nexus_types::support_bundle::BundleDataSelection::new(); if row.flags.include_reconfigurator { @@ -292,8 +310,9 @@ impl TryFrom selection.insert(host_info.into()); } if let Some(ereports) = row.ereports { - selection.insert(ereports.try_into()?); + selection.insert(ereports.into()); } - Ok(selection) + selection.set_time_range(row.time_range.map(BundleTimeRange::from)); + selection } } diff --git a/nexus/db-queries/src/db/datastore/ereport.rs b/nexus/db-queries/src/db/datastore/ereport.rs index 54aa6f6418d..147c5e13063 100644 --- a/nexus/db-queries/src/db/datastore/ereport.rs +++ b/nexus/db-queries/src/db/datastore/ereport.rs @@ -32,6 +32,7 @@ use nexus_db_schema::schema::ereport::dsl; use nexus_types::fm::ereport as fm; use nexus_types::fm::ereport::EreportFilters; use nexus_types::fm::ereport::EreportId; +use nexus_types::support_bundle::BundleTimeRange; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; @@ -96,11 +97,13 @@ impl DataStore { &self, opctx: &OpContext, filters: &EreportFilters, + time_range: Option<&BundleTimeRange>, pagparams: &DataPageParams<'_, (Uuid, DbEna)>, ) -> ListResultVec { opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let query = Self::ereport_fetch_matching_query(filters, pagparams); + let query = + Self::ereport_fetch_matching_query(filters, time_range, pagparams); query .load_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -109,6 +112,7 @@ impl DataStore { fn ereport_fetch_matching_query( filters: &EreportFilters, + time_range: Option<&BundleTimeRange>, pagparams: &DataPageParams<'_, (Uuid, DbEna)>, ) -> impl RunnableQuery + use<> { let mut query = paginated_multicolumn( @@ -119,12 +123,13 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .select(Ereport::as_select()); - if let Some(start) = filters.start_time() { - query = query.filter(dsl::time_collected.ge(start)); - } - - if let Some(end) = filters.end_time() { - query = query.filter(dsl::time_collected.le(end)); + if let Some(range) = time_range { + if let Some(start) = range.start { + query = query.filter(dsl::time_collected.ge(start)); + } + if let Some(end) = range.end { + query = query.filter(dsl::time_collected.le(end)); + } } if !filters.only_serials().is_empty() { @@ -553,23 +558,20 @@ mod tests { #[tokio::test] async fn explain_ereport_fetch_matching_only_time() { - explain_fetch_matching_query( + explain_fetch_matching_query_with_time( "explain_ereport_fetch_matching_only_time", - EreportFilters::new() - .with_end_time(chrono::Utc::now()) - .expect("no start time set"), + EreportFilters::new(), + BundleTimeRange { start: None, end: Some(chrono::Utc::now()) }, ) .await } #[tokio::test] async fn explain_ereport_fetch_matching_time_and_serials() { - explain_fetch_matching_query( + explain_fetch_matching_query_with_time( "explain_ereport_fetch_matching_only_time", - EreportFilters::new() - .with_serials(["BRM6900420", "BRM5555555"]) - .with_end_time(chrono::Utc::now()) - .expect("no start time set"), + EreportFilters::new().with_serials(["BRM6900420", "BRM5555555"]), + BundleTimeRange { start: None, end: Some(chrono::Utc::now()) }, ) .await } @@ -577,6 +579,23 @@ mod tests { async fn explain_fetch_matching_query( test_name: &str, filters: EreportFilters, + ) { + explain_fetch_matching_query_inner(test_name, filters, None).await + } + + async fn explain_fetch_matching_query_with_time( + test_name: &str, + filters: EreportFilters, + time_range: BundleTimeRange, + ) { + explain_fetch_matching_query_inner(test_name, filters, Some(time_range)) + .await + } + + async fn explain_fetch_matching_query_inner( + test_name: &str, + filters: EreportFilters, + time_range: Option, ) { let logctx = dev::test_setup_log(test_name); let db = TestDatabase::new_with_pool(&logctx.log).await; @@ -590,8 +609,11 @@ mod tests { }; eprintln!("--- filters: {filters:#?}\n"); - let query = - DataStore::ereport_fetch_matching_query(&filters, &pagparams); + let query = DataStore::ereport_fetch_matching_query( + &filters, + time_range.as_ref(), + &pagparams, + ); let explanation = query .explain_async(&conn) @@ -795,19 +817,25 @@ mod tests { }; let found_default = datastore - .ereport_fetch_matching(opctx, &Default::default(), &pagparams) + .ereport_fetch_matching( + opctx, + &Default::default(), + None, + &pagparams, + ) .await .expect("fetch matching with default filters should succeed"); check_results(dbg!(found_default), &id, &ereport); + let time_range = BundleTimeRange { + start: Some(ereport.time_collected - Duration::from_secs(600)), + end: None, + }; let found_by_time_range = datastore .ereport_fetch_matching( opctx, - &EreportFilters::new() - .with_start_time( - ereport.time_collected - Duration::from_secs(600), - ) - .expect("no end time set"), + &EreportFilters::new(), + Some(&time_range), &pagparams, ) .await @@ -818,6 +846,7 @@ mod tests { .ereport_fetch_matching( opctx, &EreportFilters::new().with_serials(["my cool serial"]), + None, &pagparams, ) .await @@ -828,6 +857,7 @@ mod tests { .ereport_fetch_matching( opctx, &EreportFilters::new().with_classes(["my cool ereport"]), + None, &pagparams, ) .await diff --git a/nexus/db-queries/src/db/datastore/fm.rs b/nexus/db-queries/src/db/datastore/fm.rs index 1552d463dfb..a3ff3121fdc 100644 --- a/nexus/db-queries/src/db/datastore/fm.rs +++ b/nexus/db-queries/src/db/datastore/fm.rs @@ -119,6 +119,7 @@ sitrep_child_tables! { SupportBundleRequestDataSelectionFlags => { table: "fm_support_bundle_request_data_selection_flags" }, SupportBundleRequestDataSelectionHostInfo => { table: "fm_support_bundle_request_data_selection_host_info" }, SupportBundleRequestDataSelectionEreports => { table: "fm_support_bundle_request_data_selection_ereports" }, + SupportBundleRequestDataSelectionTimeRange => { table: "fm_support_bundle_request_data_selection_time_range" }, SupportBundleRequest => { table: "fm_support_bundle_request" }, Case => { table: "fm_case" }, } @@ -576,6 +577,7 @@ impl DataStore { use nexus_db_schema::schema::fm_support_bundle_request_data_selection_ereports::dsl as ereports_dsl; use nexus_db_schema::schema::fm_support_bundle_request_data_selection_flags::dsl as flags_dsl; use nexus_db_schema::schema::fm_support_bundle_request_data_selection_host_info::dsl as host_info_dsl; + use nexus_db_schema::schema::fm_support_bundle_request_data_selection_time_range::dsl as time_range_dsl; let sitrep_uuid = id.into_untyped_uuid(); let mut selections = @@ -600,6 +602,11 @@ impl DataStore { .on(ereports_dsl::sitrep_id.eq(flags_dsl::sitrep_id) .and(ereports_dsl::request_id.eq(flags_dsl::request_id))), ) + .left_join( + time_range_dsl::fm_support_bundle_request_data_selection_time_range + .on(time_range_dsl::sitrep_id.eq(flags_dsl::sitrep_id) + .and(time_range_dsl::request_id.eq(flags_dsl::request_id))), + ) .select(DbBundleDataSelection::as_select()) .load_async(conn) .await @@ -611,11 +618,7 @@ impl DataStore { paginator = p.found_batch(&batch, &|row| row.flags.request_id); for row in batch { let request_id: SupportBundleUuid = row.flags.request_id.into(); - let domain_selection: BundleDataSelection = - row.try_into().map_err(|e: Error| { - e.internal_context("failed to convert data selection") - })?; - selections.insert(request_id, domain_selection); + selections.insert(request_id, row.into()); } } @@ -847,11 +850,12 @@ impl DataStore { requests: Vec, data_selections: Vec<(SupportBundleUuid, BundleDataSelection)>, ) -> Result<(), InsertSitrepError> { - use model::fm::{DataSelectionFlags, Ereports, HostInfo}; + use model::fm::{DataSelectionFlags, Ereports, HostInfo, TimeRange}; use nexus_db_schema::schema::fm_support_bundle_request::dsl as support_bundle_req_dsl; use nexus_db_schema::schema::fm_support_bundle_request_data_selection_ereports::dsl as ereports_dsl; use nexus_db_schema::schema::fm_support_bundle_request_data_selection_flags::dsl as flags_dsl; use nexus_db_schema::schema::fm_support_bundle_request_data_selection_host_info::dsl as host_info_dsl; + use nexus_db_schema::schema::fm_support_bundle_request_data_selection_time_range::dsl as time_range_dsl; if !requests.is_empty() { diesel::insert_into( @@ -870,6 +874,7 @@ impl DataStore { let mut flags_rows = Vec::new(); let mut host_info_rows = Vec::new(); let mut ereports_rows = Vec::new(); + let mut time_range_rows = Vec::new(); for (req_id, data_selection) in data_selections { flags_rows.push(DataSelectionFlags::from_sitrep( @@ -877,6 +882,10 @@ impl DataStore { req_id, &data_selection, )); + if let Some(range) = data_selection.time_range() { + time_range_rows + .push(TimeRange::from_sitrep(sitrep_id, req_id, range)); + } for data in data_selection { match data { BundleData::Reconfigurator @@ -932,6 +941,20 @@ impl DataStore { .internal_context("failed to insert sb_req_ereports rows") })?; } + if !time_range_rows.is_empty() { + diesel::insert_into( + time_range_dsl::fm_support_bundle_request_data_selection_time_range, + ) + .values(time_range_rows) + .execute_async(conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context( + "failed to insert sb_req_time_range rows", + ) + })?; + } Ok(()) } @@ -1484,6 +1507,7 @@ impl DataStore { use nexus_db_schema::schema::fm_support_bundle_request_data_selection_ereports::dsl as ereports_dsl; use nexus_db_schema::schema::fm_support_bundle_request_data_selection_flags::dsl as flags_dsl; use nexus_db_schema::schema::fm_support_bundle_request_data_selection_host_info::dsl as host_info_dsl; + use nexus_db_schema::schema::fm_support_bundle_request_data_selection_time_range::dsl as time_range_dsl; diesel::delete( flags_dsl::fm_support_bundle_request_data_selection_flags @@ -1503,6 +1527,12 @@ impl DataStore { ) .execute_async(conn) .await?; + diesel::delete( + time_range_dsl::fm_support_bundle_request_data_selection_time_range + .filter(time_range_dsl::sitrep_id.eq_any(sitrep_ids.clone())), + ) + .execute_async(conn) + .await?; diesel::delete( support_bundle_req_dsl::fm_support_bundle_request .filter(support_bundle_req_dsl::sitrep_id.eq_any(sitrep_ids)), @@ -2213,7 +2243,7 @@ mod tests { }) .unwrap(); // A request with a nontrivial data_selection, including - // HostInfo(Specific) and ereport time filters. + // HostInfo(Specific) and a bundle-wide time range. { use nexus_types::fm::ereport::EreportFilters; use nexus_types::support_bundle::*; @@ -2224,13 +2254,11 @@ mod tests { .with_specific_sleds([ omicron_uuid_kinds::SledUuid::new_v4(), ]) - .with_ereports( - EreportFilters::new() - .with_start_time(now - chrono::Duration::hours(1)) - .unwrap() - .with_end_time(now) - .unwrap(), - ); + .with_ereports(EreportFilters::new()) + .with_time_range(BundleTimeRange { + start: Some(now - chrono::Duration::hours(1)), + end: Some(now), + }); support_bundles_requested .insert_unique(fm::case::SupportBundleRequest { id: SupportBundleUuid::new_v4(), @@ -2378,13 +2406,13 @@ mod tests { .with_specific_sleds([sled1, sled2]) .with_ereports( EreportFilters::new() - .with_start_time(now - chrono::Duration::hours(2)) - .unwrap() - .with_end_time(now) - .unwrap() .with_serials(["BRM42"]) .with_classes(["ereport.io"]), - ); + ) + .with_time_range(BundleTimeRange { + start: Some(now - chrono::Duration::hours(2)), + end: Some(now), + }); support_bundles_requested .insert_unique(fm::case::SupportBundleRequest { id: SupportBundleUuid::new_v4(), diff --git a/nexus/db-queries/src/db/datastore/support_bundle.rs b/nexus/db-queries/src/db/datastore/support_bundle.rs index 71b2ca5e727..742bbcd94a7 100644 --- a/nexus/db-queries/src/db/datastore/support_bundle.rs +++ b/nexus/db-queries/src/db/datastore/support_bundle.rs @@ -261,33 +261,36 @@ impl DataStore { use nexus_db_schema::schema::support_bundle_data_selection_ereports::dsl as ereports_dsl; use nexus_db_schema::schema::support_bundle_data_selection_flags::dsl as flags_dsl; use nexus_db_schema::schema::support_bundle_data_selection_host_info::dsl as host_info_dsl; + use nexus_db_schema::schema::support_bundle_data_selection_time_range::dsl as time_range_dsl; let conn = self.pool_connection_authorized(opctx).await?; let bundle_uuid = bundle_id.into_untyped_uuid(); - flags_dsl::support_bundle_data_selection_flags - .filter(flags_dsl::bundle_id.eq(bundle_uuid)) - .left_join( - host_info_dsl::support_bundle_data_selection_host_info - .on(host_info_dsl::bundle_id.eq(flags_dsl::bundle_id)), - ) - .left_join( - ereports_dsl::support_bundle_data_selection_ereports - .on(ereports_dsl::bundle_id.eq(flags_dsl::bundle_id)), - ) - .select(DbBundleDataSelection::as_select()) - .first_async(&*conn) - .await - .map_err(|e| { - public_error_from_diesel(e, ErrorHandler::Server) - .internal_context( - "failed to query data selection for bundle", - ) - })? - .try_into() - .map_err(|e: Error| { - e.internal_context("failed to convert data selection") - }) + let row: DbBundleDataSelection = + flags_dsl::support_bundle_data_selection_flags + .filter(flags_dsl::bundle_id.eq(bundle_uuid)) + .left_join( + host_info_dsl::support_bundle_data_selection_host_info + .on(host_info_dsl::bundle_id.eq(flags_dsl::bundle_id)), + ) + .left_join( + ereports_dsl::support_bundle_data_selection_ereports + .on(ereports_dsl::bundle_id.eq(flags_dsl::bundle_id)), + ) + .left_join( + time_range_dsl::support_bundle_data_selection_time_range + .on(time_range_dsl::bundle_id.eq(flags_dsl::bundle_id)), + ) + .select(DbBundleDataSelection::as_select()) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context( + "failed to query data selection for bundle", + ) + })?; + Ok(row.into()) } /// Looks up a single support bundle @@ -645,10 +648,13 @@ impl DataStore { bundle_id: SupportBundleUuid, data_selection: BundleDataSelection, ) -> Result<(), diesel::result::Error> { - use crate::db::model::{DataSelectionFlags, Ereports, HostInfo}; + use crate::db::model::{ + DataSelectionFlags, Ereports, HostInfo, TimeRange, + }; use nexus_db_schema::schema::support_bundle_data_selection_ereports::dsl as ereports_dsl; use nexus_db_schema::schema::support_bundle_data_selection_flags::dsl as flags_dsl; use nexus_db_schema::schema::support_bundle_data_selection_host_info::dsl as host_info_dsl; + use nexus_db_schema::schema::support_bundle_data_selection_time_range::dsl as time_range_dsl; // Always insert a flags row. diesel::insert_into(flags_dsl::support_bundle_data_selection_flags) @@ -663,6 +669,17 @@ impl DataStore { .execute_async(conn) .await?; + // Insert the bundle-wide time-range row (if set), independent + // of any per-category rows. + if let Some(range) = data_selection.time_range() { + diesel::insert_into( + time_range_dsl::support_bundle_data_selection_time_range, + ) + .values(TimeRange::new(bundle_id, range)) + .execute_async(conn) + .await?; + } + // Insert payload tables for variants that carry data. for data in data_selection { match data { @@ -704,6 +721,7 @@ impl DataStore { use nexus_db_schema::schema::support_bundle_data_selection_ereports::dsl as ereports_dsl; use nexus_db_schema::schema::support_bundle_data_selection_flags::dsl as flags_dsl; use nexus_db_schema::schema::support_bundle_data_selection_host_info::dsl as host_info_dsl; + use nexus_db_schema::schema::support_bundle_data_selection_time_range::dsl as time_range_dsl; diesel::delete(flags_dsl::support_bundle_data_selection_flags) .filter(flags_dsl::bundle_id.eq_any(bundle_ids.clone())) @@ -714,9 +732,15 @@ impl DataStore { .execute_async(conn) .await?; diesel::delete(ereports_dsl::support_bundle_data_selection_ereports) - .filter(ereports_dsl::bundle_id.eq_any(bundle_ids)) + .filter(ereports_dsl::bundle_id.eq_any(bundle_ids.clone())) .execute_async(conn) .await?; + diesel::delete( + time_range_dsl::support_bundle_data_selection_time_range, + ) + .filter(time_range_dsl::bundle_id.eq_any(bundle_ids)) + .execute_async(conn) + .await?; Ok(()) } diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 85383eac200..dc42d9ba15c 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -1620,17 +1620,24 @@ table! { table! { support_bundle_data_selection_ereports (bundle_id) { bundle_id -> Uuid, - start_time -> Nullable, - end_time -> Nullable, only_serials -> Array, only_classes -> Array, } } +table! { + support_bundle_data_selection_time_range (bundle_id) { + bundle_id -> Uuid, + start_time -> Nullable, + end_time -> Nullable, + } +} + allow_tables_to_appear_in_same_query!( support_bundle_data_selection_flags, support_bundle_data_selection_host_info, support_bundle_data_selection_ereports, + support_bundle_data_selection_time_range, ); /* hardware inventory */ @@ -3297,17 +3304,25 @@ table! { fm_support_bundle_request_data_selection_ereports (sitrep_id, request_id) { sitrep_id -> Uuid, request_id -> Uuid, - start_time -> Nullable, - end_time -> Nullable, only_serials -> Array, only_classes -> Array, } } +table! { + fm_support_bundle_request_data_selection_time_range (sitrep_id, request_id) { + sitrep_id -> Uuid, + request_id -> Uuid, + start_time -> Nullable, + end_time -> Nullable, + } +} + allow_tables_to_appear_in_same_query!( fm_support_bundle_request_data_selection_flags, fm_support_bundle_request_data_selection_host_info, fm_support_bundle_request_data_selection_ereports, + fm_support_bundle_request_data_selection_time_range, ); table! { diff --git a/nexus/mgs-updates/src/test_util/host_phase_2_test_state.rs b/nexus/mgs-updates/src/test_util/host_phase_2_test_state.rs index f228f68d961..1299a32faed 100644 --- a/nexus/mgs-updates/src/test_util/host_phase_2_test_state.rs +++ b/nexus/mgs-updates/src/test_util/host_phase_2_test_state.rs @@ -941,6 +941,18 @@ mod api_impl { unimplemented!() } + async fn support_logs_download_v1( + _request_context: RequestContext, + _path_params: Path< + sled_agent_types_versions::v1::diagnostics::SledDiagnosticsLogsDownloadPathParam, + >, + _query_params: dropshot::Query< + sled_agent_types_versions::v1::diagnostics::SledDiagnosticsLogsDownloadQueryParam, + >, + ) -> Result, HttpError> { + unimplemented!() + } + async fn chicken_switch_destroy_orphaned_datasets_get_v1( _request_context: RequestContext, ) -> Result< diff --git a/nexus/tests/integration_tests/support_bundles.rs b/nexus/tests/integration_tests/support_bundles.rs index 4ddd5365e16..4b296a50a26 100644 --- a/nexus/tests/integration_tests/support_bundles.rs +++ b/nexus/tests/integration_tests/support_bundles.rs @@ -6,14 +6,20 @@ use anyhow::Context; use anyhow::Result; +use camino::Utf8Path; +use camino::Utf8PathBuf; +use chrono::DateTime; +use chrono::Utc; use dropshot::HttpErrorResponseBody; use dropshot::test_util::ClientTestContext; use futures::TryStreamExt; use http::StatusCode; use http::method::Method; +use internal_dns_resolver::Resolver; use nexus_db_model::SupportBundleState as DbSupportBundleState; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; use nexus_lockstep_client::types::LastResult; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; @@ -25,10 +31,15 @@ use nexus_types::internal_api::background::SupportBundleCleanupReport; use nexus_types::internal_api::background::SupportBundleCollectionReport; use nexus_types::internal_api::background::SupportBundleCollectionStep; use nexus_types::internal_api::background::SupportBundleEreportStatus; +use nexus_types::support_bundle::BundleDataSelection; +use nexus_types::support_bundle::BundleTimeRange; use omicron_common::api::external::LookupType; use omicron_uuid_kinds::SupportBundleUuid; use serde::Deserialize; use std::io::Cursor; +use std::sync::Arc; +use support_bundle_collection::BundleCollection; +use support_bundle_collection::BundleInfo; use zip::read::ZipArchive; type ControlPlaneTestContext = @@ -1013,3 +1024,158 @@ async fn test_support_bundle_fm_case_id(cptestctx: &ControlPlaneTestContext) { .into_inner(); assert_eq!(fetched_fm.fm_case_id, Some(case_id)); } + +/// The collector writes per-sled logs at +/// `/rack//sled//logs//`. Find the +/// first matching directory; tests that use this only inject data +/// on one sled. +fn find_first_zone_log_dir(root: &Utf8Path, zone: &str) -> Utf8PathBuf { + let rack_root = root.join("rack"); + let mut entries = rack_root + .read_dir_utf8() + .expect("read rack dir") + .filter_map(Result::ok); + let rack = entries.next().expect("at least one rack").path().to_owned(); + let mut sled_iter = rack + .join("sled") + .read_dir_utf8() + .expect("read sled dir") + .filter_map(Result::ok); + loop { + let sled = + sled_iter.next().expect("at least one sled with injected logs"); + let candidate = sled.path().join("logs").join(zone); + if candidate.exists() { + return candidate; + } + } +} + +/// Build a `BundleCollection` with `start` as the bundle-wide lower +/// bound and run `collect_bundle_locally` into a fresh temp dir. +/// Bypasses the Nexus state machine and `support_bundle` row; +/// useful only for exercising the collection mechanism directly. +async fn collect_with_window( + cptestctx: &ControlPlaneTestContext, + opctx: &OpContext, + datastore: &Arc, + resolver: Resolver, + start: DateTime, +) -> camino_tempfile::Utf8TempDir { + let bundle_id = SupportBundleUuid::new_v4(); + let log = cptestctx.logctx.log.new(slog::o!( + "component" => "test_support_bundle_log_time_range", + "bundle" => bundle_id.to_string(), + )); + let selection = BundleDataSelection::new() + .with_all_sleds() + .with_time_range(BundleTimeRange { start: Some(start), end: None }); + let collection = Arc::new(BundleCollection::new( + datastore.clone(), + resolver, + log, + opctx.child(std::collections::BTreeMap::new()), + selection, + BundleInfo { + id: bundle_id, + reason_for_creation: "log time-range test".into(), + }, + )); + let dir = camino_tempfile::tempdir().expect("creating output tempdir"); + collection + .collect_bundle_locally(&dir) + .await + .expect("collect_bundle_locally"); + dir +} + +// Exercise the host-info time-range filter end-to-end: +// inject synthetic log entries on the sim sled-agent with crafted mtimes, +// then run the bundle collector and confirm that only entries within the +// requested window land in the output directory. +#[nexus_test] +async fn test_support_bundle_log_time_range( + cptestctx: &ControlPlaneTestContext, +) { + use chrono::TimeDelta; + use omicron_sled_agent::sim::SimLogEntry; + + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let resolver = nexus.resolver(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.clone(), datastore.clone()); + + // Inject four synthetic entries with mtimes that bracket the two + // windows we test below: 30 m and 6 h ago land inside both, 3 d + // ago lands only inside the 7 d window, and 30 d ago is outside + // both. + let now = Utc::now(); + let recent = now - TimeDelta::minutes(30); + let middle = now - TimeDelta::hours(6); + let between = now - TimeDelta::days(3); + let ancient = now - TimeDelta::days(30); + let zone = "test_zone"; + let sled_agent = cptestctx.first_sled_agent(); + for (filename, mtime) in [ + ("recent.log", recent), + ("middle.log", middle), + ("between.log", between), + ("ancient.log", ancient), + ] { + sled_agent.insert_support_log( + zone, + SimLogEntry { + filename: filename.into(), + contents: filename.as_bytes().to_vec(), + mtime, + }, + ); + } + + // 24h window: recent + middle inside; between (3 d) and ancient + // (30 d) excluded. + let dir_24h = collect_with_window( + cptestctx, + &opctx, + datastore, + resolver.clone(), + now - TimeDelta::hours(24), + ) + .await; + let log_dir = find_first_zone_log_dir(dir_24h.path(), zone); + assert!( + log_dir.join("recent.log").exists(), + "24h window: recent.log should be present at {log_dir}", + ); + assert!( + log_dir.join("middle.log").exists(), + "24h window: middle.log should be present at {log_dir}", + ); + assert!( + !log_dir.join("between.log").exists(), + "24h window: between.log (3 d) should NOT be present at {log_dir}", + ); + assert!( + !log_dir.join("ancient.log").exists(), + "24h window: ancient.log should NOT be present at {log_dir}", + ); + + // 7d window: between (3 d) now in scope; ancient (30 d) still out. + let dir_7d = collect_with_window( + cptestctx, + &opctx, + datastore, + resolver.clone(), + now - TimeDelta::days(7), + ) + .await; + let log_dir = find_first_zone_log_dir(dir_7d.path(), zone); + assert!(log_dir.join("recent.log").exists()); + assert!(log_dir.join("middle.log").exists()); + assert!( + log_dir.join("between.log").exists(), + "7d window: between.log (3 d) should be present at {log_dir}", + ); + assert!(!log_dir.join("ancient.log").exists()); +} diff --git a/nexus/types/src/fm/ereport.rs b/nexus/types/src/fm/ereport.rs index 2cc219392c8..672e97fa267 100644 --- a/nexus/types/src/fm/ereport.rs +++ b/nexus/types/src/fm/ereport.rs @@ -6,7 +6,6 @@ use crate::inventory::SpType; use chrono::{DateTime, Utc}; -use omicron_common::api::external::Error; use omicron_uuid_kinds::EreporterRestartUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::SitrepUuid; @@ -239,23 +238,8 @@ fn get_sp_metadata_string( /// .with_classes(["hw.pwr.*"]); /// ``` /// -/// Note: JSON deserialization validates the start_time/end_time constraints -/// using `TryFrom`. #[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] -#[serde(try_from = "EreportFiltersParams")] pub struct EreportFilters { - /// If present, include only ereports that were collected at the specified - /// timestamp or later. - /// - /// If `end_time` is also present, this value *must* be at or before - /// `end_time`. This invariant is enforced by [`Self::with_start_time`]. - start_time: Option>, - /// If present, include only ereports that were collected at the specified - /// timestamp or before. - /// - /// If `start_time` is also present, this value *must* be at or after - /// `start_time`. This invariant is enforced by [`Self::with_end_time`]. - end_time: Option>, /// If this list is non-empty, include only ereports that were reported by /// systems with the provided serial numbers. only_serials: Vec, @@ -265,40 +249,6 @@ pub struct EreportFilters { only_classes: Vec, } -/// Helper for [`EreportFilters`] that validates the `start_time <= end_time` -/// invariant. Used by serde deserialization above, as well as in conversions -/// from Diesel `Ereports` model types. -#[derive(Deserialize)] -#[must_use = "this struct does nothing unless converted to EreportFilters"] -pub struct EreportFiltersParams { - pub start_time: Option>, - pub end_time: Option>, - pub only_serials: Vec, - pub only_classes: Vec, -} - -impl TryFrom for EreportFilters { - type Error = Error; - - fn try_from(params: EreportFiltersParams) -> Result { - let EreportFiltersParams { - start_time, - end_time, - only_serials, - only_classes, - } = params; - let mut f = Self::new(); - if let Some(t) = start_time { - f = f.with_start_time(t)?; - } - if let Some(t) = end_time { - f = f.with_end_time(t)?; - } - f = f.with_serials(only_serials).with_classes(only_classes); - Ok(f) - } -} - /// Displayer for pretty-printing [`EreportFilters`]. #[must_use = "this struct does nothing unless displayed"] pub struct DisplayEreportFilters<'a> { @@ -311,39 +261,6 @@ impl EreportFilters { Self::default() } - /// Includes only ereports collected at or after `time`. - /// - /// Returns an error if `time` is after a previously set end time. - pub fn with_start_time( - mut self, - time: DateTime, - ) -> Result { - if let Some(end) = self.end_time { - if time > end { - return Err(Error::invalid_request( - "start time must be before end time", - )); - } - } - self.start_time = Some(time); - Ok(self) - } - - /// Includes only ereports collected at or before `time`. - /// - /// Returns an error if `time` is before a previously set start time. - pub fn with_end_time(mut self, time: DateTime) -> Result { - if let Some(start) = self.start_time { - if start > time { - return Err(Error::invalid_request( - "start time must be before end time", - )); - } - } - self.end_time = Some(time); - Ok(self) - } - /// Adds serial numbers to the inclusion filter. /// /// When one or more serials are present, only ereports reported by @@ -368,16 +285,6 @@ impl EreportFilters { self } - /// Returns the start time filter, if set. - pub fn start_time(&self) -> Option> { - self.start_time - } - - /// Returns the end time filter, if set. - pub fn end_time(&self) -> Option> { - self.end_time - } - /// Returns the serial number inclusion filter. pub fn only_serials(&self) -> &[String] { &self.only_serials @@ -411,12 +318,6 @@ impl fmt::Display for DisplayEreportFilters<'_> { f.write_fmt(args) }; - if let Some(start) = filters.start_time() { - fmt_part(f, format_args!("start: {start}"))?; - } - if let Some(end) = filters.end_time() { - fmt_part(f, format_args!("end: {end}"))?; - } if !filters.only_serials().is_empty() { fmt_part( f, @@ -449,41 +350,17 @@ pub(crate) mod test_utils { use super::*; use proptest::prelude::*; - fn arb_datetime() -> impl Strategy> { - // Generate timestamps in a reasonable range (2020-2030). - (1577836800i64..1893456000i64) - .prop_map(|secs| DateTime::from_timestamp(secs, 0).unwrap()) - } - impl Arbitrary for EreportFilters { type Parameters = (); type Strategy = BoxedStrategy; fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { ( - prop::option::of(arb_datetime()), - prop::option::of(arb_datetime()), prop::collection::vec(".*", 0..=3), prop::collection::vec(".*", 0..=3), ) - .prop_map(|(time_a, time_b, only_serials, only_classes)| { - // Ensure start <= end when both are present. - let (start_time, end_time) = match (time_a, time_b) { - (Some(a), Some(b)) if a > b => (Some(b), Some(a)), - other => other, - }; - let mut filters = EreportFilters::new(); - if let Some(t) = start_time { - filters = filters - .with_start_time(t) - .expect("no end time set yet"); - } - if let Some(t) = end_time { - filters = filters - .with_end_time(t) - .expect("start <= end by construction"); - } - filters + .prop_map(|(only_serials, only_classes)| { + EreportFilters::new() .with_serials(only_serials) .with_classes(only_classes) }) diff --git a/nexus/types/src/support_bundle.rs b/nexus/types/src/support_bundle.rs index 4c95dcb3233..c4dd07d5ef3 100644 --- a/nexus/types/src/support_bundle.rs +++ b/nexus/types/src/support_bundle.rs @@ -8,6 +8,8 @@ //! They are shared between the support bundle collector and FM case types. use crate::fm::ereport::EreportFilters; +use chrono::DateTime; +use chrono::Utc; use itertools::Itertools; use omicron_uuid_kinds::SledUuid; use serde::{Deserialize, Serialize}; @@ -49,6 +51,19 @@ pub enum BundleData { Ereports(EreportFilters), } +/// Inclusive time bound applied bundle-wide to time-bounded categories +/// (currently host-info logs and ereports). +/// +/// `None` on either side means unbounded on that side. Stored as a +/// single field on [`BundleDataSelection`] rather than smeared across +/// per-category filters; the `time_range()` accessor surfaces it to +/// consumers. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct BundleTimeRange { + pub start: Option>, + pub end: Option>, +} + impl BundleData { fn category(&self) -> BundleDataCategory { match self { @@ -77,8 +92,8 @@ impl fmt::Display for DisplayBundleData<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.data { BundleData::Reconfigurator => write!(f, "reconfigurator"), - BundleData::HostInfo(selection) => { - write!(f, "host_info({})", selection.display()) + BundleData::HostInfo(sleds) => { + write!(f, "host_info({})", sleds.display()) } BundleData::SledCubbyInfo => write!(f, "sled_cubby_info"), BundleData::SpDumps => write!(f, "sp_dumps"), @@ -94,33 +109,38 @@ impl fmt::Display for DisplayBundleData<'_> { /// This wrapper ensures that categories and data always match - you can't /// insert (BundleDataCategory::Reconfigurator, BundleData::SpDumps) /// because each BundleData determines its own category. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +/// +/// `time_range`, when set, bounds every time-bounded category's +/// collection (host-info logs and ereports). Stored as one field +/// here rather than copied into per-category filters. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] pub struct BundleDataSelection { data: HashMap, + time_range: Option, } impl BundleDataSelection { /// Creates an empty selection with no data categories. pub fn new() -> Self { - Self { data: HashMap::new() } + Self::default() } /// Returns a selection containing all default data categories - /// (i.e. "collect everything"). + /// (i.e. "collect everything") with a bundle-wide 7-day time + /// window applied to host-info logs and ereports. pub fn all() -> Self { Self::new() .with_reconfigurator() .with_all_sleds() .with_sled_cubby_info() .with_sp_dumps() - .with_ereports( - EreportFilters::new() - .with_start_time( - omicron_common::now_db_precision() - - chrono::Days::new(7), - ) - .expect("no end time set, cannot fail"), - ) + .with_ereports(EreportFilters::new()) + .with_time_range(BundleTimeRange { + start: Some( + omicron_common::now_db_precision() - chrono::Days::new(7), + ), + end: None, + }) } /// Adds reconfigurator state collection. @@ -164,12 +184,26 @@ impl BundleDataSelection { self } + /// Sets the bundle-wide time range. Affects every time-bounded + /// category (host-info logs and ereports) at collection time. + pub fn with_time_range(mut self, range: BundleTimeRange) -> Self { + self.time_range = Some(range); + self + } + /// Inserts a [`BundleData`] value. If a value with the same category /// already exists, the last write wins. pub fn insert(&mut self, bundle_data: BundleData) { self.data.insert(bundle_data.category(), bundle_data); } + /// Sets the bundle-wide time range in place (used by code paths + /// that build the selection incrementally, e.g. database read + /// paths). + pub fn set_time_range(&mut self, range: Option) { + self.time_range = range; + } + /// Returns `true` if reconfigurator state should be collected. pub fn contains_reconfigurator(&self) -> bool { self.data.contains_key(&BundleDataCategory::Reconfigurator) @@ -202,6 +236,12 @@ impl BundleDataSelection { _ => None, } } + + /// Returns the bundle-wide time range, if any was set. Applies + /// to every time-bounded category at collection time. + pub fn time_range(&self) -> Option<&BundleTimeRange> { + self.time_range.as_ref() + } } impl IntoIterator for BundleDataSelection { @@ -260,12 +300,6 @@ impl fmt::Display for DisplayBundleDataSelection<'_> { } } -impl Default for BundleDataSelection { - fn default() -> Self { - Self::new() - } -} - /// The set of sleds to include. This can either be all sleds, or a set of /// specific sleds. #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] @@ -311,13 +345,41 @@ pub(crate) mod test_utils { use super::*; use proptest::prelude::*; + fn arb_datetime() -> impl Strategy> { + // Span the full representable range of `DateTime` so + // round-trip tests exercise far-past and far-future times, + // not just a hand-picked window that drifts out of date. + let min = DateTime::::MIN_UTC.timestamp(); + let max = DateTime::::MAX_UTC.timestamp(); + (min..=max).prop_map(|secs| DateTime::from_timestamp(secs, 0).unwrap()) + } + + impl Arbitrary for BundleTimeRange { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { + (prop::option::of(arb_datetime()), prop::option::of(arb_datetime())) + .prop_map(|(start, end)| BundleTimeRange { start, end }) + .boxed() + } + } + impl Arbitrary for BundleDataSelection { type Parameters = (); type Strategy = BoxedStrategy; fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - prop::collection::vec(any::(), 0..=5) - .prop_map(|data| data.into_iter().collect()) + ( + prop::collection::vec(any::(), 0..=5), + prop::option::of(any::()), + ) + .prop_map(|(data, time_range)| { + let mut sel: BundleDataSelection = + data.into_iter().collect(); + sel.set_time_range(time_range); + sel + }) .boxed() } } diff --git a/openapi/sled-agent/sled-agent-37.0.0-a1f825.json.gitstub b/openapi/sled-agent/sled-agent-37.0.0-a1f825.json.gitstub new file mode 100644 index 00000000000..8352e4453ff --- /dev/null +++ b/openapi/sled-agent/sled-agent-37.0.0-a1f825.json.gitstub @@ -0,0 +1 @@ +71da5e7d49981b8b676a4737e0d93f5765450b80:openapi/sled-agent/sled-agent-37.0.0-a1f825.json diff --git a/openapi/sled-agent/sled-agent-37.0.0-a1f825.json b/openapi/sled-agent/sled-agent-38.0.0-1db2eb.json similarity index 99% rename from openapi/sled-agent/sled-agent-37.0.0-a1f825.json rename to openapi/sled-agent/sled-agent-38.0.0-1db2eb.json index d479abd971d..4f5b6777cbf 100644 --- a/openapi/sled-agent/sled-agent-37.0.0-a1f825.json +++ b/openapi/sled-agent/sled-agent-38.0.0-1db2eb.json @@ -7,7 +7,7 @@ "url": "https://oxide.computer", "email": "api@oxide.computer" }, - "version": "37.0.0" + "version": "38.0.0" }, "paths": { "/artifacts": { @@ -704,16 +704,36 @@ "type": "string" } }, + { + "in": "query", + "name": "end_time", + "description": "Upper bound (inclusive) on log file `mtime`. If absent, no upper bound is applied.", + "schema": { + "nullable": true, + "type": "string", + "format": "date-time" + } + }, { "in": "query", "name": "max_rotated", - "description": "The max number of rotated logs to include in the final support bundle", - "required": true, + "description": "The max number of rotated logs to include in the final support bundle. If absent, no count cap is applied.", "schema": { + "nullable": true, "type": "integer", "format": "uint", "minimum": 0 } + }, + { + "in": "query", + "name": "start_time", + "description": "Lower bound (inclusive) on log file `mtime`. If absent, no lower bound is applied.", + "schema": { + "nullable": true, + "type": "string", + "format": "date-time" + } } ], "responses": { diff --git a/openapi/sled-agent/sled-agent-latest.json b/openapi/sled-agent/sled-agent-latest.json index ccb5f6634a4..d78b9ad95bf 120000 --- a/openapi/sled-agent/sled-agent-latest.json +++ b/openapi/sled-agent/sled-agent-latest.json @@ -1 +1 @@ -sled-agent-37.0.0-a1f825.json \ No newline at end of file +sled-agent-38.0.0-1db2eb.json \ No newline at end of file diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index caa7558304a..79f3634c237 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3201,13 +3201,26 @@ CREATE TABLE IF NOT EXISTS omicron.public.support_bundle_data_selection_host_inf CREATE TABLE IF NOT EXISTS omicron.public.support_bundle_data_selection_ereports ( bundle_id UUID NOT NULL, - start_time TIMESTAMPTZ, - end_time TIMESTAMPTZ, only_serials TEXT[] NOT NULL DEFAULT ARRAY[], only_classes TEXT[] NOT NULL DEFAULT ARRAY[], + PRIMARY KEY (bundle_id) +); + +/* + * Bundle-wide time bound applied to time-bounded categories + * (host-info logs and ereports). Persisted at the bundle level + * rather than per-category; absent rows imply no time bound. + */ +CREATE TABLE IF NOT EXISTS omicron.public.support_bundle_data_selection_time_range ( + bundle_id UUID NOT NULL, + start_time TIMESTAMPTZ, + end_time TIMESTAMPTZ, + PRIMARY KEY (bundle_id), - CHECK (start_time IS NULL OR end_time IS NULL OR start_time <= end_time) + CONSTRAINT start_before_end CHECK ( + start_time IS NULL OR end_time IS NULL OR start_time <= end_time + ) ); /*******************************************************************/ @@ -7718,13 +7731,27 @@ CREATE TABLE IF NOT EXISTS omicron.public.fm_support_bundle_request_data_selecti CREATE TABLE IF NOT EXISTS omicron.public.fm_support_bundle_request_data_selection_ereports ( sitrep_id UUID NOT NULL, request_id UUID NOT NULL, - start_time TIMESTAMPTZ, - end_time TIMESTAMPTZ, only_serials TEXT[] NOT NULL DEFAULT ARRAY[], only_classes TEXT[] NOT NULL DEFAULT ARRAY[], + PRIMARY KEY (sitrep_id, request_id) +); + +/* + * Bundle-wide time bound for FM-driven support bundle requests. + * Mirrors `support_bundle_data_selection_time_range` but keyed by + * the FM (sitrep_id, request_id) tuple. + */ +CREATE TABLE IF NOT EXISTS omicron.public.fm_support_bundle_request_data_selection_time_range ( + sitrep_id UUID NOT NULL, + request_id UUID NOT NULL, + start_time TIMESTAMPTZ, + end_time TIMESTAMPTZ, + PRIMARY KEY (sitrep_id, request_id), - CHECK (start_time IS NULL OR end_time IS NULL OR start_time <= end_time) + CONSTRAINT start_before_end CHECK ( + start_time IS NULL OR end_time IS NULL OR start_time <= end_time + ) ); /* @@ -8557,7 +8584,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '256.0.0', NULL) + (TRUE, NOW(), NOW(), '257.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/support-bundle-time-range/up1.sql b/schema/crdb/support-bundle-time-range/up1.sql new file mode 100644 index 00000000000..d07032ebc58 --- /dev/null +++ b/schema/crdb/support-bundle-time-range/up1.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS omicron.public.support_bundle_data_selection_time_range ( + bundle_id UUID NOT NULL, + start_time TIMESTAMPTZ, + end_time TIMESTAMPTZ, + + PRIMARY KEY (bundle_id), + CONSTRAINT start_before_end CHECK ( + start_time IS NULL OR end_time IS NULL OR start_time <= end_time + ) +); diff --git a/schema/crdb/support-bundle-time-range/up2.sql b/schema/crdb/support-bundle-time-range/up2.sql new file mode 100644 index 00000000000..880a8516666 --- /dev/null +++ b/schema/crdb/support-bundle-time-range/up2.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS omicron.public.fm_support_bundle_request_data_selection_time_range ( + sitrep_id UUID NOT NULL, + request_id UUID NOT NULL, + start_time TIMESTAMPTZ, + end_time TIMESTAMPTZ, + + PRIMARY KEY (sitrep_id, request_id), + CONSTRAINT start_before_end CHECK ( + start_time IS NULL OR end_time IS NULL OR start_time <= end_time + ) +); diff --git a/schema/crdb/support-bundle-time-range/up3.sql b/schema/crdb/support-bundle-time-range/up3.sql new file mode 100644 index 00000000000..149a7383a9c --- /dev/null +++ b/schema/crdb/support-bundle-time-range/up3.sql @@ -0,0 +1,23 @@ +-- Migrate any pre-existing time bounds from the per-category ereports +-- tables into the new bundle-level time_range tables. Idempotent via +-- ON CONFLICT DO NOTHING (the new tables are keyed by the same id(s) +-- as the source rows). +-- +-- Reading every row of the ereports tables is the point here, so +-- override CockroachDB's full-table-scan guardrail for this txn. + +SET LOCAL disallow_full_table_scans = off; + +INSERT INTO omicron.public.support_bundle_data_selection_time_range + (bundle_id, start_time, end_time) +SELECT bundle_id, start_time, end_time +FROM omicron.public.support_bundle_data_selection_ereports +WHERE start_time IS NOT NULL OR end_time IS NOT NULL +ON CONFLICT (bundle_id) DO NOTHING; + +INSERT INTO omicron.public.fm_support_bundle_request_data_selection_time_range + (sitrep_id, request_id, start_time, end_time) +SELECT sitrep_id, request_id, start_time, end_time +FROM omicron.public.fm_support_bundle_request_data_selection_ereports +WHERE start_time IS NOT NULL OR end_time IS NOT NULL +ON CONFLICT (sitrep_id, request_id) DO NOTHING; diff --git a/schema/crdb/support-bundle-time-range/up4.sql b/schema/crdb/support-bundle-time-range/up4.sql new file mode 100644 index 00000000000..ac370e165eb --- /dev/null +++ b/schema/crdb/support-bundle-time-range/up4.sql @@ -0,0 +1,7 @@ +-- Drop start_time from the support_bundle ereports table. CASCADE +-- removes the dependent (anonymous) CHECK constraint on +-- start_time/end_time, which is no longer relevant once the time +-- columns live in support_bundle_data_selection_time_range. + +ALTER TABLE omicron.public.support_bundle_data_selection_ereports + DROP COLUMN IF EXISTS start_time CASCADE; diff --git a/schema/crdb/support-bundle-time-range/up5.sql b/schema/crdb/support-bundle-time-range/up5.sql new file mode 100644 index 00000000000..f9e594245ce --- /dev/null +++ b/schema/crdb/support-bundle-time-range/up5.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.support_bundle_data_selection_ereports + DROP COLUMN IF EXISTS end_time; diff --git a/schema/crdb/support-bundle-time-range/up6.sql b/schema/crdb/support-bundle-time-range/up6.sql new file mode 100644 index 00000000000..e12b991eaf3 --- /dev/null +++ b/schema/crdb/support-bundle-time-range/up6.sql @@ -0,0 +1,5 @@ +-- Drop start_time from the FM ereports table. CASCADE removes the +-- dependent (anonymous) CHECK constraint. + +ALTER TABLE omicron.public.fm_support_bundle_request_data_selection_ereports + DROP COLUMN IF EXISTS start_time CASCADE; diff --git a/schema/crdb/support-bundle-time-range/up7.sql b/schema/crdb/support-bundle-time-range/up7.sql new file mode 100644 index 00000000000..a7a0940bfda --- /dev/null +++ b/schema/crdb/support-bundle-time-range/up7.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.fm_support_bundle_request_data_selection_ereports + DROP COLUMN IF EXISTS end_time; diff --git a/sled-agent/api/src/lib.rs b/sled-agent/api/src/lib.rs index 501ddfc322d..d5986fb876e 100644 --- a/sled-agent/api/src/lib.rs +++ b/sled-agent/api/src/lib.rs @@ -38,6 +38,7 @@ api_versions!([ // | example for the next person. // v // (next_int, IDENT), + (38, ADD_LOG_TIME_RANGE), (37, MODIFY_SVC_ENABLED_NOT_ONLINE_STATE), (36, DROPSHOT_FREEFORM_BODY_DESC), (35, INLINE_ROUTER_PEER_IP_ADDR), @@ -1332,6 +1333,7 @@ pub trait SledAgentApi { #[endpoint { method = GET, path = "/support/logs/download/{zone}", + versions = VERSION_ADD_LOG_TIME_RANGE.., }] async fn support_logs_download( request_context: RequestContext, @@ -1343,6 +1345,25 @@ pub trait SledAgentApi { >, ) -> Result, HttpError>; + /// Pre-`VERSION_ADD_LOG_TIME_RANGE` shape of the zone-logs download + /// endpoint: takes only `max_rotated`, with no time-range query + /// parameters. Newer clients use the entry above. + #[endpoint { + operation_id = "support_logs_download", + method = GET, + path = "/support/logs/download/{zone}", + versions = ..VERSION_ADD_LOG_TIME_RANGE, + }] + async fn support_logs_download_v1( + request_context: RequestContext, + path_params: Path< + v1::diagnostics::SledDiagnosticsLogsDownloadPathParam, + >, + query_params: Query< + v1::diagnostics::SledDiagnosticsLogsDownloadQueryParam, + >, + ) -> Result, HttpError>; + /// This endpoint reports the status of the `destroy_orphaned_datasets` /// chicken switch. It will be removed with omicron#6177. #[endpoint { diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index 2eb525d3e53..6b92548ae9a 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -1430,12 +1430,48 @@ impl SledAgentApi for SledAgentImpl { let sa = request_context.context(); let SledDiagnosticsLogsDownloadPathParam { zone } = path_params.into_inner(); - let SledDiagnosticsLogsDownloadQueryParam { max_rotated } = - query_params.into_inner(); + let SledDiagnosticsLogsDownloadQueryParam { + max_rotated, + start_time, + end_time, + } = query_params.into_inner(); + let window = sled_diagnostics::LogTimeWindow { + start: start_time, + end: end_time, + }; sa.latencies() .instrument_dropshot_handler(&request_context, async { sa.as_support_bundle_logs() - .get_logs_for_zone(zone, max_rotated) + .get_logs_for_zone(zone, max_rotated, window) + .await + .map_err(HttpError::from) + }) + .await + } + + async fn support_logs_download_v1( + request_context: RequestContext, + path_params: Path< + v1::diagnostics::SledDiagnosticsLogsDownloadPathParam, + >, + query_params: Query< + v1::diagnostics::SledDiagnosticsLogsDownloadQueryParam, + >, + ) -> Result, HttpError> { + let sa = request_context.context(); + let v1::diagnostics::SledDiagnosticsLogsDownloadPathParam { zone } = + path_params.into_inner(); + let v1::diagnostics::SledDiagnosticsLogsDownloadQueryParam { + max_rotated, + } = query_params.into_inner(); + sa.latencies() + .instrument_dropshot_handler(&request_context, async { + sa.as_support_bundle_logs() + .get_logs_for_zone( + zone, + Some(max_rotated), + sled_diagnostics::LogTimeWindow::default(), + ) .await .map_err(HttpError::from) }) diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index 8e65a9a4574..2ed7fc647d7 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -997,18 +997,61 @@ impl SledAgentApi for SledAgentSimImpl { } async fn support_logs( - _request_context: RequestContext, + request_context: RequestContext, ) -> Result>, HttpError> { - // Return an empty zone list for testing. - Ok(HttpResponseOk(Default::default())) + let sa = request_context.context(); + // Return whatever zones tests have injected entries for. By + // default this is empty. + let zones = + sa.support_logs.lock().unwrap().keys().cloned().collect::>(); + Ok(HttpResponseOk(zones)) } async fn support_logs_download( - _request_context: RequestContext, - _path_params: Path, - _query_params: Query, + request_context: RequestContext, + path_params: Path, + query_params: Query, ) -> Result, HttpError> { - method_unimplemented() + let sa = request_context.context(); + let SledDiagnosticsLogsDownloadPathParam { zone } = + path_params.into_inner(); + let SledDiagnosticsLogsDownloadQueryParam { + max_rotated, + start_time, + end_time, + } = query_params.into_inner(); + super::sim_support_logs::serve_zip( + sa, + &zone, + max_rotated, + sled_diagnostics::LogTimeWindow { + start: start_time, + end: end_time, + }, + ) + } + + async fn support_logs_download_v1( + request_context: RequestContext, + path_params: Path< + v1::diagnostics::SledDiagnosticsLogsDownloadPathParam, + >, + query_params: Query< + v1::diagnostics::SledDiagnosticsLogsDownloadQueryParam, + >, + ) -> Result, HttpError> { + let sa = request_context.context(); + let v1::diagnostics::SledDiagnosticsLogsDownloadPathParam { zone } = + path_params.into_inner(); + let v1::diagnostics::SledDiagnosticsLogsDownloadQueryParam { + max_rotated, + } = query_params.into_inner(); + super::sim_support_logs::serve_zip( + sa, + &zone, + Some(max_rotated), + sled_diagnostics::LogTimeWindow::default(), + ) } async fn chicken_switch_destroy_orphaned_datasets_get_v1( diff --git a/sled-agent/src/sim/mod.rs b/sled-agent/src/sim/mod.rs index e00e964e600..aee92a7533f 100644 --- a/sled-agent/src/sim/mod.rs +++ b/sled-agent/src/sim/mod.rs @@ -12,6 +12,7 @@ mod http_entrypoints_pantry; mod http_entrypoints_storage; mod instance; mod server; +mod sim_support_logs; mod simulatable; mod sled_agent; mod storage; @@ -23,6 +24,7 @@ pub use config::{ TEST_HARDWARE_THREADS, TEST_RESERVOIR_RAM, ZpoolConfig, }; pub use server::{RssArgs, Server, run_standalone_server}; +pub use sled_agent::SimLogEntry; pub use sled_agent::SledAgent; pub use storage::PantryServer; pub(crate) use storage::Storage; diff --git a/sled-agent/src/sim/sim_support_logs.rs b/sled-agent/src/sim/sim_support_logs.rs new file mode 100644 index 00000000000..55f97bf492e --- /dev/null +++ b/sled-agent/src/sim/sim_support_logs.rs @@ -0,0 +1,78 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Simulated implementation of the sled-agent's `support_logs_download` +//! endpoint. +//! +//! Tests inject [`super::SimLogEntry`] entries via +//! `SledAgent::insert_support_log`. This module renders the entries +//! for a given zone into an in-memory zip, applying the same +//! `start_time`/`end_time`/`max_rotated` filters as the real +//! handler. + +use super::sled_agent::SledAgent; +use dropshot::HttpError; +use sled_diagnostics::LogTimeWindow; +use std::io::Write; + +/// Build a zip of synthetic log entries for `zone` and return it as +/// an `application/zip` HTTP response. Entries with mtime outside the +/// requested `window` are excluded; if `max_rotated` is `Some(n)`, no +/// more than `n` entries are returned (after time filtering, sorted +/// newest-first). +pub(super) fn serve_zip( + sa: &SledAgent, + zone: &str, + max_rotated: Option, + window: LogTimeWindow, +) -> Result, HttpError> { + let mut entries = { + let logs = sa.support_logs.lock().unwrap(); + logs.get(zone).cloned().unwrap_or_default() + }; + + entries.retain(|e| { + if let Some(start) = window.start { + if e.mtime < start { + return false; + } + } + if let Some(end) = window.end { + if e.mtime > end { + return false; + } + } + true + }); + + // Sort newest-first so the count cap takes the most recent. + entries.sort_by(|a, b| b.mtime.cmp(&a.mtime)); + if let Some(n) = max_rotated { + entries.truncate(n); + } + + let mut buf = Vec::new(); + { + let mut zip = zip::ZipWriter::new(std::io::Cursor::new(&mut buf)); + for entry in &entries { + let opts: zip::write::FileOptions<'_, ()> = + zip::write::FileOptions::default() + .compression_method(zip::CompressionMethod::Stored); + zip.start_file(&entry.filename, opts).map_err(|err| { + HttpError::for_internal_error(err.to_string()) + })?; + zip.write_all(&entry.contents).map_err(|err| { + HttpError::for_internal_error(err.to_string()) + })?; + } + zip.finish() + .map_err(|err| HttpError::for_internal_error(err.to_string()))?; + } + + Ok(http::Response::builder() + .status(http::StatusCode::OK) + .header(http::header::CONTENT_TYPE, "application/zip") + .body(dropshot::Body::from(buf)) + .unwrap()) +} diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 384a1ed6ea7..75452f2b33b 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -20,6 +20,7 @@ use anyhow::Context; use anyhow::bail; use bootstore::schemes::v0 as bootstore; use bytes::Bytes; +use chrono::DateTime; use chrono::Utc; use dropshot::Body; use dropshot::HttpError; @@ -79,6 +80,21 @@ use std::time::Duration; use tufaceous_artifact::ArtifactHash; use uuid::Uuid; +/// A synthetic log file the simulated sled-agent will return from +/// `support_logs_download`. +/// +/// Tests construct these with controlled mtimes to exercise the +/// support-bundle log time-range filtering end-to-end. +#[derive(Clone, Debug)] +pub struct SimLogEntry { + /// File name to write inside the per-zone zip. + pub filename: String, + /// File contents. + pub contents: Vec, + /// Modification time used for the time-range filter. + pub mtime: DateTime, +} + /// Simulates management of the control plane on a sled /// /// The current implementation simulates a server directly in this program. @@ -117,6 +133,12 @@ pub struct SledAgent { /// counter and return 503 Service Unavailable. local_storage_error_count: AtomicU32, pub bootstore_network_config: Mutex, + /// Test-only log entries keyed by zone name. Populated via + /// [`SledAgent::insert_support_log`]; consulted by the + /// `support_logs` / `support_logs_download` HTTP handlers. + /// Empty by default. + pub(crate) support_logs: + Mutex>>, pub(super) repo_depot: dropshot::HttpServer>, pub log: Logger, @@ -203,6 +225,7 @@ impl SledAgent { repo_depot, log, bootstore_network_config, + support_logs: Mutex::new(std::collections::BTreeMap::new()), health_monitor, }) } @@ -1094,6 +1117,26 @@ impl SledAgent { .map_err(|err| err.into()) } + /// Test helper: append a synthetic log entry that the sim + /// `support_logs_download` handler will return for `zone`. + pub fn insert_support_log( + &self, + zone: impl Into, + entry: SimLogEntry, + ) { + self.support_logs + .lock() + .unwrap() + .entry(zone.into()) + .or_default() + .push(entry); + } + + /// Test helper: drop all injected synthetic log entries. + pub fn clear_support_logs(&self) { + self.support_logs.lock().unwrap().clear(); + } + pub fn datasets_ensure( &self, config: DatasetsConfig, diff --git a/sled-agent/src/support_bundle/logs.rs b/sled-agent/src/support_bundle/logs.rs index bd158920f63..37de74d9f3b 100644 --- a/sled-agent/src/support_bundle/logs.rs +++ b/sled-agent/src/support_bundle/logs.rs @@ -10,6 +10,7 @@ use futures::StreamExt; use futures::stream::FuturesUnordered; use range_requests::make_get_response; use sled_agent_config_reconciler::AvailableDatasetsReceiver; +use sled_diagnostics::LogTimeWindow; use slog::Logger; use slog_error_chain::InlineErrorChain; use tokio::io::AsyncSeekExt; @@ -72,10 +73,15 @@ impl<'a> SupportBundleLogs<'a> { /// For a given zone and its services create a zip file of all logs /// found in that zone and stream it out via an `HttpResponse`. + /// + /// `max_rotated` caps the number of rotated files per service when + /// `Some(_)`. `window` bounds files by `mtime`; either side may be + /// `None` for "unbounded on that side." pub async fn get_logs_for_zone( &self, zone: Z, - max_rotated: usize, + max_rotated: Option, + window: LogTimeWindow, ) -> Result, Error> where Z: Into, @@ -88,7 +94,9 @@ impl<'a> SupportBundleLogs<'a> { let zip_file = { let handle = sled_diagnostics::LogsHandle::new(log); - match handle.get_zone_logs(&zone, max_rotated, &mut tempfile).await + match handle + .get_zone_logs(&zone, max_rotated, window, &mut tempfile) + .await { Ok(_) => Ok(tempfile), Err(e) => Err(e), diff --git a/sled-agent/types/versions/src/add_log_time_range/diagnostics.rs b/sled-agent/types/versions/src/add_log_time_range/diagnostics.rs new file mode 100644 index 00000000000..d7ea2c0b3b2 --- /dev/null +++ b/sled-agent/types/versions/src/add_log_time_range/diagnostics.rs @@ -0,0 +1,48 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Diagnostics types for Sled Agent API `ADD_LOG_TIME_RANGE`. + +use chrono::DateTime; +use chrono::Utc; +use schemars::JsonSchema; +use serde::Deserialize; + +use crate::v1; + +/// Query parameters for sled-diagnostics log download requests. +/// +/// `max_rotated` becomes optional in this version: callers using a +/// time-range bound typically don't want a count cap on top. +#[derive(Deserialize, JsonSchema)] +pub struct SledDiagnosticsLogsDownloadQueryParam { + /// The max number of rotated logs to include in the final support + /// bundle. If absent, no count cap is applied. + #[serde(default)] + pub max_rotated: Option, + + /// Lower bound (inclusive) on log file `mtime`. If absent, no + /// lower bound is applied. + #[serde(default)] + pub start_time: Option>, + + /// Upper bound (inclusive) on log file `mtime`. If absent, no + /// upper bound is applied. + #[serde(default)] + pub end_time: Option>, +} + +impl From + for SledDiagnosticsLogsDownloadQueryParam +{ + fn from( + old: v1::diagnostics::SledDiagnosticsLogsDownloadQueryParam, + ) -> Self { + Self { + max_rotated: Some(old.max_rotated), + start_time: None, + end_time: None, + } + } +} diff --git a/sled-agent/types/versions/src/add_log_time_range/mod.rs b/sled-agent/types/versions/src/add_log_time_range/mod.rs new file mode 100644 index 00000000000..e9035775756 --- /dev/null +++ b/sled-agent/types/versions/src/add_log_time_range/mod.rs @@ -0,0 +1,13 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Version `ADD_LOG_TIME_RANGE` of the Sled Agent API. +//! +//! This version reshapes the `support_logs_download` endpoint's query +//! parameters to support time-bounded log collection: `max_rotated` +//! becomes optional, and `start_time`/`end_time` are added so callers +//! can request only logs whose `mtime` falls within the supplied +//! window. + +pub mod diagnostics; diff --git a/sled-agent/types/versions/src/latest.rs b/sled-agent/types/versions/src/latest.rs index 7ceb9d668c9..97dbd50f909 100644 --- a/sled-agent/types/versions/src/latest.rs +++ b/sled-agent/types/versions/src/latest.rs @@ -41,7 +41,7 @@ pub mod debug { pub mod diagnostics { pub use crate::v1::diagnostics::SledDiagnosticsLogsDownloadPathParam; pub use crate::v1::diagnostics::SledDiagnosticsLogsDownloadPathParm; - pub use crate::v1::diagnostics::SledDiagnosticsLogsDownloadQueryParam; + pub use crate::v38::diagnostics::SledDiagnosticsLogsDownloadQueryParam; } pub mod disk { diff --git a/sled-agent/types/versions/src/lib.rs b/sled-agent/types/versions/src/lib.rs index 8b8fc8071c0..40ce26f39aa 100644 --- a/sled-agent/types/versions/src/lib.rs +++ b/sled-agent/types/versions/src/lib.rs @@ -81,6 +81,8 @@ pub mod v33; pub mod v34; #[path = "modify_svc_enabled_not_online_state/mod.rs"] pub mod v37; +#[path = "add_log_time_range/mod.rs"] +pub mod v38; #[path = "add_nexus_lockstep_port_to_inventory/mod.rs"] pub mod v4; #[path = "add_probe_put_endpoint/mod.rs"] diff --git a/sled-diagnostics/src/lib.rs b/sled-diagnostics/src/lib.rs index 659468051a5..420c8fba238 100644 --- a/sled-diagnostics/src/lib.rs +++ b/sled-diagnostics/src/lib.rs @@ -22,7 +22,7 @@ cfg_if::cfg_if! { } pub mod logs; -pub use logs::{LogError, LogsHandle}; +pub use logs::{LogError, LogTimeWindow, LogsHandle}; mod queries; pub use crate::queries::{ diff --git a/sled-diagnostics/src/logs.rs b/sled-diagnostics/src/logs.rs index f2d4b1b36d4..5e399f1d47c 100644 --- a/sled-diagnostics/src/logs.rs +++ b/sled-diagnostics/src/logs.rs @@ -11,6 +11,8 @@ use std::{ }; use camino::{Utf8Path, Utf8PathBuf}; +use chrono::DateTime; +use chrono::Utc; use fs_err::File; use illumos_utils::zfs::{ CreateSnapshotError, DestroySnapshotError, GetValueError, @@ -566,7 +568,8 @@ impl LogsHandle { pub async fn get_zone_logs( &self, zone: &str, - max_rotated: usize, + max_rotated: Option, + window: LogTimeWindow, writer: &mut W, ) -> Result<(), LogError> { // We are opting to use oxlog to find logs rather than using a similar @@ -575,6 +578,14 @@ impl LogsHandle { // internal structures like a list of running zones. Instead we operate // on all of the log paths that oxlog is capable of discovering via the // filesystem directly. + // + // NOTE: We deliberately pass `date_range: None` to oxlog and apply + // the time-range filter ourselves below. oxlog's filter would + // exclude "current" SMF log files whose mtime falls outside the + // window — but the active log file is exactly the place fresh + // entries land, so we always want it included regardless of the + // requested window. Filtering here lets us keep current logs + // unconditionally and gate only the archived/extra files. let zones = oxlog::Zones::load().map_err(|e| LogError::OxLog(e))?; let zone_logs = zones.zone_logs( zone, @@ -599,10 +610,13 @@ impl LogsHandle { // we'll leak snapshots. let mut log_snapshots = LogSnapshots::new(); + let mtime_range = window.to_mtime_range(); + let result = self .get_zone_logs_inner( zone_logs, max_rotated, + mtime_range, zip, &mut log_snapshots, ) @@ -616,12 +630,19 @@ impl LogsHandle { async fn get_zone_logs_inner( &self, zone_logs: BTreeMap, - max_rotated: usize, + max_rotated: Option, + mtime_range: MtimeRange, mut zip: zip::ZipWriter, mut log_snapshots: &mut LogSnapshots, ) -> Result<(), LogError> { for (service, service_logs) in zone_logs { // - Grab all of the service's SMF logs - + + // The "current" log is always included, regardless of mtime + // bounds — the file is where fresh entries land, and a + // long-quiet but still-active service shouldn't lose its + // current log just because its last write predates the + // window. if let Some(current) = service_logs.current { self.process_logs( &service, @@ -643,6 +664,7 @@ impl LogsHandle { .archived .into_iter() .filter(|log| log.path.as_str().contains("crypt/debug")) + .filter(|log| mtime_range.contains(log.modified)) .collect(); // Since these logs can be spread out across multiple U.2 devices @@ -655,12 +677,19 @@ impl LogsHandle { .unwrap_or(0) }); - for file in archived.iter().rev().take(max_rotated) { + // Apply the count cap (`max_rotated`) only when the caller + // specified one. With a time range and no count cap, take + // everything in the window. + let mut to_process: Vec<&LogFile> = archived.iter().rev().collect(); + if let Some(n) = max_rotated { + to_process.truncate(n); + } + for file in to_process { self.process_logs( &service, &mut zip, &mut log_snapshots, - &file, + file, LogType::Archive, ) .await?; @@ -692,8 +721,18 @@ impl LogsHandle { .await?; } - // We clamp the number of rotated logs we grab to 5. - for log in logs.rotated.iter().rev().take(max_rotated) { + // Apply mtime + count cap to rotated extras. + let mut rotated: Vec<&LogFile> = logs + .rotated + .iter() + .copied() + .rev() + .filter(|log| mtime_range.contains(log.modified)) + .collect(); + if let Some(n) = max_rotated { + rotated.truncate(n); + } + for log in rotated { self.process_logs( &service, &mut zip, @@ -712,6 +751,60 @@ impl LogsHandle { } } +/// Inclusive log-collection time window expressed in chrono terms. +/// +/// Bundles `start_time`/`end_time` into one type so callers can't +/// accidentally swap them at the call site — the two fields have the +/// same type and same `Option`-ness, and would otherwise be adjacent +/// positional arguments. +#[derive(Clone, Copy, Debug, Default)] +pub struct LogTimeWindow { + pub start: Option>, + pub end: Option>, +} + +impl LogTimeWindow { + fn to_mtime_range(self) -> MtimeRange { + MtimeRange { + start: self.start.map(chrono_to_jiff), + end: self.end.map(chrono_to_jiff), + } + } +} + +/// Inclusive `mtime` window applied to archived and extra log files. +#[derive(Clone, Copy, Debug, Default)] +struct MtimeRange { + start: Option, + end: Option, +} + +impl MtimeRange { + /// Returns true if a file with the given `modified` time should be + /// included. Files with unknown mtime (`None`) are included — over- + /// inclusive matches the calling pattern. + fn contains(&self, modified: Option) -> bool { + let Some(mtime) = modified else { + return true; + }; + if let Some(start) = self.start { + if mtime < start { + return false; + } + } + if let Some(end) = self.end { + if mtime > end { + return false; + } + } + true + } +} + +fn chrono_to_jiff(ts: DateTime) -> jiff::Timestamp { + jiff::Timestamp::from_second(ts.timestamp()).unwrap_or(jiff::Timestamp::MIN) +} + fn write_log_to_zip( logger: &Logger, service: &str, diff --git a/support-bundle-collection/src/steps/ereports.rs b/support-bundle-collection/src/steps/ereports.rs index d8f4aa69b4d..e306f1f351f 100644 --- a/support-bundle-collection/src/steps/ereports.rs +++ b/support-bundle-collection/src/steps/ereports.rs @@ -35,6 +35,7 @@ pub async fn collect( debug!(log, "Support bundle: ereports not requested"); return Ok(CollectionStepOutput::Skipped); }; + let time_range = collection.data_selection().time_range().cloned(); let ereports_dir = dir.join("ereports"); let mut status = SupportBundleEreportStatus::default(); if let Err(err) = save_ereports( @@ -43,6 +44,7 @@ pub async fn collect( opctx, datastore, ereport_filters.clone(), + time_range, ereports_dir, &mut status, ) @@ -67,12 +69,14 @@ pub async fn collect( // // Cancel-**unsafe**: writes to the filesystem via `tokio::fs`. // DB queries are eagerly cancelled via `select!`. +#[allow(clippy::too_many_arguments)] async fn save_ereports( collection: &BundleCollection, log: &Logger, opctx: &OpContext, datastore: &Arc, filters: EreportFilters, + time_range: Option, dir: Utf8PathBuf, status: &mut SupportBundleEreportStatus, ) -> anyhow::Result<()> { @@ -83,7 +87,7 @@ async fn save_ereports( let ereports = tokio::select! { _ = collection.cancelled() => return Ok(()), result = datastore.ereport_fetch_matching( - &opctx, &filters, &pagparams + &opctx, &filters, time_range.as_ref(), &pagparams ) => { result.map_err(|e| { e.internal_context("failed to query for ereports") diff --git a/support-bundle-collection/src/steps/host_info.rs b/support-bundle-collection/src/steps/host_info.rs index 41cfadc58ad..cf60749209c 100644 --- a/support-bundle-collection/src/steps/host_info.rs +++ b/support-bundle-collection/src/steps/host_info.rs @@ -31,6 +31,8 @@ pub async fn spawn_query_all_sleds( else { return Ok(CollectionStepOutput::Skipped); }; + let sled_selection = sled_selection.clone(); + let time_range = collection.data_selection().time_range().cloned(); let all_sleds = tokio::select! { _ = collection.cancelled() => return Ok(CollectionStepOutput::None), @@ -48,12 +50,16 @@ pub async fn spawn_query_all_sleds( } let sled = sled.clone(); + let time_range = time_range.clone(); extra_steps.push(CollectionStep::new( format!("sled data for sled {}", sled.id()), Box::new({ move |collection, dir| { async move { - collect_data_from_sled(collection, sled, dir).await + collect_data_from_sled( + collection, sled, time_range, dir, + ) + .await } .boxed() } @@ -79,6 +85,7 @@ pub async fn spawn_query_all_sleds( async fn collect_data_from_sled( collection: &BundleCollection, sled: Sled, + time_range: Option, dir: &Utf8Path, ) -> anyhow::Result { let (log, opctx, datastore) = @@ -240,6 +247,7 @@ async fn collect_data_from_sled( &sled_client, zone, &sled_path, + time_range.as_ref(), cancellation_token, ) }) @@ -317,15 +325,23 @@ async fn save_zone_log_zip_or_error( client: &sled_agent_client::Client, zone: &str, path: &Utf8Path, + time_range: Option<&nexus_types::support_bundle::BundleTimeRange>, cancellation_token: &CancellationToken, ) -> anyhow::Result<()> { - // In the future when support bundle collection exposes tuning parameters - // this can turn into a collection parameter. - const DEFAULT_MAX_ROTATED_LOGS: u32 = 5; + // Destructure with field names so the positional Progenitor call + // below can't accidentally swap start_time and end_time (both have + // the same `Option<&DateTime>` type). + let nexus_types::support_bundle::BundleTimeRange { start, end } = + time_range.cloned().unwrap_or_default(); let download_result = tokio::select! { _ = cancellation_token.cancelled() => return Ok(()), - result = client.support_logs_download(zone, DEFAULT_MAX_ROTATED_LOGS) => result, + result = client.support_logs_download( + zone, + /* end_time */ end.as_ref(), + /* max_rotated*/ None, + /* start_time */ start.as_ref(), + ) => result, }; match download_result {