diff --git a/aggregator/src/aggregator.rs b/aggregator/src/aggregator.rs index d24caee50..41f3b371d 100644 --- a/aggregator/src/aggregator.rs +++ b/aggregator/src/aggregator.rs @@ -55,8 +55,8 @@ use janus_messages::{ AggregationJobContinueReq, AggregationJobId, AggregationJobInitializeReq, AggregationJobResp, AggregationJobStep, BatchSelector, CollectionJobId, CollectionJobReq, CollectionJobResp, Duration, ExtensionType, HpkeConfig, HpkeConfigList, InputShareAad, Interval, - PartialBatchSelector, PlaintextInputShare, PrepareResp, Report, ReportError, - ReportUploadStatus, Role, TaskId, UploadErrors, + PartialBatchSelector, PlaintextInputShare, Report, ReportError, ReportUploadStatus, Role, + TaskId, UploadErrors, VerifyResp, batch_mode::{LeaderSelected, TimeInterval}, taskprov::TaskConfig, }; @@ -1045,20 +1045,20 @@ impl TaskAggregator { VdafInstance::Fake { rounds } => VdafOps::Fake(Arc::new(dummy::Vdaf::new(*rounds))), #[cfg(feature = "test-util")] - VdafInstance::FakeFailsPrepInit => VdafOps::Fake(Arc::new( + VdafInstance::FakeFailsVerifyInit => VdafOps::Fake(Arc::new( dummy::Vdaf::new(1).with_verify_init_fn(|_| -> Result<(), VdafError> { Err(VdafError::Uncategorized( - "FakeFailsPrepInit failed at prep_init".to_string(), + "FakeFailsVerifyInit failed at verify_init".to_string(), )) }), )), #[cfg(feature = "test-util")] - VdafInstance::FakeFailsPrepStep => { + VdafInstance::FakeFailsVerifyStep => { VdafOps::Fake(Arc::new(dummy::Vdaf::new(1).with_verify_next_fn( |_| -> Result, VdafError> { Err(VdafError::Uncategorized( - "FakeFailsPrepStep failed at prep_step".to_string(), + "FakeFailsVerifyStep failed at verify_step".to_string(), )) }, ))) @@ -1976,7 +1976,7 @@ impl VdafOps { mutating_aggregation_job: &AggregationJob, mutating_report_aggregations: impl IntoIterator>, log_forbidden_mutations: Option, - ) -> Result>, datastore::Error> + ) -> Result>, datastore::Error> where B: AccumulableBatchMode, A: AsyncAggregator, @@ -2068,7 +2068,7 @@ impl VdafOps { )); } - // This is a repeated request. Send the preparation responses we computed last time. + // This is a repeated request. Send the verification responses we computed last time. return Ok(Some( tx.get_report_aggregations_for_aggregation_job( vdaf, @@ -2078,7 +2078,7 @@ impl VdafOps { ) .await? .iter() - .filter_map(ReportAggregation::last_prep_resp) + .filter_map(ReportAggregation::last_verify_resp) .cloned() .collect(), )); @@ -2112,9 +2112,9 @@ impl VdafOps { // If two ReportShare messages have the same report ID, then the helper MUST abort with // error "invalidMessage". (ยง4.5.1.2) - let mut seen_report_ids = HashSet::with_capacity(req.prepare_inits().len()); - for prepare_init in req.prepare_inits() { - if !seen_report_ids.insert(*prepare_init.report_share().metadata().id()) { + let mut seen_report_ids = HashSet::with_capacity(req.verify_inits().len()); + for verify_init in req.verify_inits() { + if !seen_report_ids.insert(*verify_init.report_share().metadata().id()) { return Err(Error::InvalidMessage( Some(*task.id()), "aggregate request contains duplicate report IDs", @@ -2124,9 +2124,9 @@ impl VdafOps { // Build initial aggregation job & report aggregations. let client_timestamp_interval = req - .prepare_inits() + .verify_inits() .iter() - .map(|prepare_init| *prepare_init.report_share().metadata().time()) + .map(|verify_init| *verify_init.report_share().metadata().time()) .fold(Interval::EMPTY, |interval, timestamp| { interval.merged_with(×tamp).unwrap() }); @@ -2143,19 +2143,19 @@ impl VdafOps { .with_last_request_hash(request_hash); let report_aggregations = req - .prepare_inits() + .verify_inits() .iter() .enumerate() - .map(|(ord, prepare_init)| { + .map(|(ord, verify_init)| { Ok(ReportAggregation::::new( *task.id(), *aggregation_job_id, - *prepare_init.report_share().metadata().id(), - *prepare_init.report_share().metadata().time(), + *verify_init.report_share().metadata().id(), + *verify_init.report_share().metadata().time(), u64::try_from(ord)?, None, ReportAggregationState::HelperInitProcessing { - prepare_init: prepare_init.clone(), + verify_init: verify_init.clone(), require_taskbind_extension, }, )) @@ -2223,7 +2223,7 @@ impl VdafOps { // the same response as last time. let aggregation_job = Arc::new(aggregation_job); let report_aggregations = Arc::new(report_aggregations); - if let Some(prepare_resps) = datastore + if let Some(verify_resps) = datastore .run_tx("aggregate_init_idempotecy", |tx| { let vdaf = Arc::clone(&vdaf); let task = Arc::clone(&task); @@ -2246,7 +2246,7 @@ impl VdafOps { }) .await? { - return Ok(Some(AggregationJobResp { prepare_resps })); + return Ok(Some(AggregationJobResp { verify_resps })); } // Compute the next aggregation step. @@ -2262,7 +2262,7 @@ impl VdafOps { .await?; // Store data to datastore. - let prepare_resps = Self::handle_aggregate_init_generic_write( + let verify_resps = Self::handle_aggregate_init_generic_write( datastore, vdaf, metrics, @@ -2276,7 +2276,7 @@ impl VdafOps { ) .await?; - Ok(Some(AggregationJobResp { prepare_resps })) + Ok(Some(AggregationJobResp { verify_resps })) } // All report aggregations must be in the HelperInitProcessing state. @@ -2330,14 +2330,14 @@ impl VdafOps { request_hash: [u8; 32], aggregation_job: Arc>, report_aggregations: Arc>>, - ) -> Result, Error> + ) -> Result, Error> where B: AccumulableBatchMode, A: AsyncAggregator, C: Clock, { let task_aggregation_counters = TaskAggregationCounter::default(); - let prepare_resps = datastore + let verify_resps = datastore .run_tx("aggregate_init_aggregator_write", |tx| { let vdaf = Arc::clone(&vdaf); let task = Arc::clone(&task); @@ -2352,7 +2352,7 @@ impl VdafOps { // the same response as last time. We check during the write transaction, even // if we have checked before, to avoid the possibility of races for concurrent // requests. - if let Some(prepare_resps) = Self::check_aggregate_init_idempotency( + if let Some(verify_resps) = Self::check_aggregate_init_idempotency( tx, vdaf.as_ref(), task.id(), @@ -2363,7 +2363,7 @@ impl VdafOps { ) .await? { - return Ok(prepare_resps); + return Ok(verify_resps); } // Write report shares, and ensure this isn't a repeated report aggregation. @@ -2405,8 +2405,9 @@ impl VdafOps { ); aggregation_job_writer .put(aggregation_job.as_ref().clone(), report_aggregations)?; - let mut prep_resps_by_agg_job = aggregation_job_writer.write(tx, vdaf).await?; - Ok(prep_resps_by_agg_job + let mut verify_resps_by_agg_job = + aggregation_job_writer.write(tx, vdaf).await?; + Ok(verify_resps_by_agg_job .remove(aggregation_job.id()) .unwrap_or_default()) }) @@ -2420,7 +2421,7 @@ impl VdafOps { task_aggregation_counters, ); - Ok(prepare_resps) + Ok(verify_resps) } async fn handle_aggregate_continue_generic< @@ -2518,9 +2519,9 @@ impl VdafOps { let resp = match task.aggregation_mode() { Some(AggregationMode::Synchronous) => { AggregationJobContinueResult::Sync(AggregationJobResp { - prepare_resps: report_aggregations + verify_resps: report_aggregations .iter() - .filter_map(ReportAggregation::last_prep_resp) + .filter_map(ReportAggregation::last_verify_resp) .cloned() .collect(), }) @@ -2552,23 +2553,23 @@ impl VdafOps { )); } - // Pair incoming preparation continuation messages with existing report + // Pair incoming verification continuation messages with existing report // aggregations. let mut report_aggregations_to_write = Vec::with_capacity(report_aggregations.len()); let mut report_aggregations_iter = report_aggregations.into_iter(); - let mut report_aggregations = Vec::with_capacity(req.prepare_continues().len()); - for prepare_continue in req.prepare_continues() { + let mut report_aggregations = Vec::with_capacity(req.verify_continues().len()); + for verify_continue in req.verify_continues() { let report_aggregation = loop { let report_aggregation = report_aggregations_iter.next().ok_or_else(|| { datastore::Error::User( Error::InvalidMessage( Some(*task.id()), - "leader sent unexpected, duplicate, or out-of-order prepare steps", + "leader sent unexpected, duplicate, or out-of-order verify steps", ) .into(), ) })?; - if report_aggregation.report_id() != prepare_continue.report_id() { + if report_aggregation.report_id() != verify_continue.report_id() { // This report was omitted by the leader because of a prior failure. // Note that the report was dropped (if it's not already in an error // state) and continue. @@ -2581,7 +2582,7 @@ impl VdafOps { .with_state(ReportAggregationState::Failed { report_error: ReportError::ReportDropped, }) - .with_last_prep_resp(None), + .with_last_verify_resp(None), None, )); } @@ -2590,13 +2591,13 @@ impl VdafOps { break report_aggregation; }; - let prepare_state = if let ReportAggregationState::HelperContinue{ prepare_state } = report_aggregation.state() { - prepare_state.clone() + let verify_state = if let ReportAggregationState::HelperContinue{ verify_state } = report_aggregation.state() { + verify_state.clone() } else { return Err(datastore::Error::User( Error::InvalidMessage( Some(*task.id()), - "leader sent prepare step for non-CONTINUE report aggregation", + "leader sent verify step for non-CONTINUE report aggregation", ) .into(), )) @@ -2604,10 +2605,10 @@ impl VdafOps { report_aggregations.push(report_aggregation .with_state(ReportAggregationState::HelperContinueProcessing { - prepare_state, - prepare_continue: prepare_continue.clone(), + verify_state, + verify_continue: verify_continue.clone(), }) - .with_last_prep_resp(None) + .with_last_verify_resp(None) ); } @@ -2624,7 +2625,7 @@ impl VdafOps { .with_state(ReportAggregationState::Failed { report_error: ReportError::ReportDropped, }) - .with_last_prep_resp(None), + .with_last_verify_resp(None), None, )); } @@ -2710,7 +2711,7 @@ impl VdafOps { ); // Store data to datastore. - let prepare_resps = Self::handle_aggregate_continue_generic_write( + let verify_resps = Self::handle_aggregate_continue_generic_write( tx, task, vdaf, @@ -2722,7 +2723,7 @@ impl VdafOps { ) .await?; Ok(AggregationJobContinueResult::Sync(AggregationJobResp { - prepare_resps, + verify_resps, })) } @@ -2779,7 +2780,7 @@ impl VdafOps { task_aggregation_counters: TaskAggregationCounter, aggregation_job: AggregationJob, report_aggregations: Vec>, - ) -> Result, Error> { + ) -> Result, Error> { // Sanity-check that we have the correct number of report aggregations. assert_eq!(report_aggregations.len(), report_aggregations.capacity()); @@ -2794,8 +2795,8 @@ impl VdafOps { task_aggregation_counters, ); aggregation_job_writer.put(aggregation_job, report_aggregations)?; - let mut prep_resps_by_agg_job = aggregation_job_writer.write(tx, vdaf).await?; - Ok(prep_resps_by_agg_job + let mut verify_resps_by_agg_job = aggregation_job_writer.write(tx, vdaf).await?; + Ok(verify_resps_by_agg_job .remove(&aggregation_job_id) .unwrap_or_default()) } @@ -2858,9 +2859,9 @@ impl VdafOps { AggregationJobState::AwaitingRequest | AggregationJobState::Finished => { Some(AggregationJobResp { - prepare_resps: report_aggregations + verify_resps: report_aggregations .into_iter() - .filter_map(|ra| ra.last_prep_resp().cloned()) + .filter_map(|ra| ra.last_verify_resp().cloned()) .collect(), }) } diff --git a/aggregator/src/aggregator/aggregation_job_continue.rs b/aggregator/src/aggregator/aggregation_job_continue.rs index e19f1d413..dcb344266 100644 --- a/aggregator/src/aggregator/aggregation_job_continue.rs +++ b/aggregator/src/aggregator/aggregation_job_continue.rs @@ -13,7 +13,7 @@ use janus_aggregator_core::{ task::AggregatorTask, }; use janus_core::vdaf::vdaf_application_context; -use janus_messages::{PrepareResp, PrepareStepResult, Role}; +use janus_messages::{Role, VerifyResp, VerifyStepResult}; use opentelemetry::metrics::Counter; use prio::topology::ping_pong::{Continued, PingPongState, PingPongTopology as _}; use rayon::iter::{IntoParallelIterator as _, ParallelIterator as _}; @@ -93,37 +93,37 @@ where // Assert safety: this function should only be called with report // aggregations in the HelperContinueProcessing state. - let (prepare_state, prepare_continue) = assert_matches!( + let (verify_state, verify_continue) = assert_matches!( report_aggregation.state(), ReportAggregationState::HelperContinueProcessing{ - prepare_state, - prepare_continue - } => (prepare_state, prepare_continue) + verify_state, + verify_continue + } => (verify_state, verify_continue) ); - let (report_aggregation_state, prepare_step_result, output_share) = - trace_span!("VDAF preparation (helper continuation)") + let (report_aggregation_state, verify_step_result, output_share) = + trace_span!("VDAF verification (helper continuation)") .in_scope(|| { // Continue with the incoming message. vdaf.helper_continued( &ctx, aggregation_job.aggregation_parameter(), - prepare_state.clone(), - prepare_continue.message(), + verify_state.clone(), + verify_continue.message(), ) .and_then(|continuation| { Ok(match continuation.evaluate(&ctx, vdaf.as_ref())? { // Helper did not finish. Store the new state, respond to // the leader with the message, and await the next message - // from the Leader to advance preparation. + // from the Leader to advance verification. PingPongState::Continued(Continued { verifier_state, message, }) => ( ReportAggregationState::HelperContinue { - prepare_state: verifier_state, + verify_state: verifier_state, }, - PrepareStepResult::Continue { message }, + VerifyStepResult::Continue { message }, None, ), @@ -135,7 +135,7 @@ where message, } => ( ReportAggregationState::Finished, - PrepareStepResult::Continue { message }, + VerifyStepResult::Continue { message }, Some(output_share), ), @@ -143,7 +143,7 @@ where // the leader with a finished message. PingPongState::Finished { output_share } => ( ReportAggregationState::Finished, - PrepareStepResult::Finished, + VerifyStepResult::Finished, Some(output_share), ), }) @@ -153,7 +153,7 @@ where handle_ping_pong_error( task.id(), Role::Leader, - prepare_continue.report_id(), + verify_continue.report_id(), error, &metrics.aggregate_step_failure_counter, ) @@ -164,7 +164,7 @@ where .increment_with_report_error(report_error); ( ReportAggregationState::Failed { report_error }, - PrepareStepResult::Reject(report_error), + VerifyStepResult::Reject(report_error), None, ) }); @@ -174,9 +174,9 @@ where .send(WritableReportAggregation::new( report_aggregation .with_state(report_aggregation_state) - .with_last_prep_resp(Some(PrepareResp::new( + .with_last_verify_resp(Some(VerifyResp::new( report_id, - prepare_step_result, + verify_step_result, ))), output_share, )) @@ -381,8 +381,8 @@ mod tests { }; use janus_messages::{ AggregationJobContinueReq, AggregationJobId, AggregationJobInitializeReq, - AggregationJobResp, AggregationJobStep, Interval, PartialBatchSelector, PrepareContinue, - PrepareResp, PrepareStepResult, Role, batch_mode::TimeInterval, + AggregationJobResp, AggregationJobStep, Interval, PartialBatchSelector, Role, + VerifyContinue, VerifyResp, VerifyStepResult, batch_mode::TimeInterval, }; use prio::{ codec::Encode as _, @@ -397,7 +397,7 @@ mod tests { post_aggregation_job_and_decode, post_aggregation_job_expecting_error, post_aggregation_job_expecting_status, }, - aggregation_job_init::test_util::{PrepareInitGenerator, put_aggregation_job}, + aggregation_job_init::test_util::{VerifyInitGenerator, put_aggregation_job}, http_handlers::{ AggregatorHandlerBuilder, test_util::{HttpHandlerTest, take_problem_details}, @@ -411,7 +411,7 @@ mod tests { > { task: Task, datastore: Arc>, - prepare_init_generator: PrepareInitGenerator, + verify_init_generator: VerifyInitGenerator, aggregation_job_id: AggregationJobId, aggregation_parameter: V::AggregationParam, first_continue_request: AggregationJobContinueReq, @@ -424,7 +424,7 @@ mod tests { #[allow(clippy::unit_arg)] async fn setup_aggregation_job_continue_test() -> AggregationJobContinueTestCase<0, dummy::Vdaf> { - // Prepare datastore & request. + // Set up datastore & request. install_test_trace_subscriber(); let aggregation_job_id = random(); @@ -442,7 +442,7 @@ mod tests { let keypair = datastore.put_hpke_key().await.unwrap(); let aggregation_parameter = dummy::AggregationParam(7); - let prepare_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), keypair.config().clone(), @@ -450,20 +450,20 @@ mod tests { aggregation_parameter, ); - let (prepare_init, transcript) = prepare_init_generator.next(&13); + let (verify_init, transcript) = verify_init_generator.next(&13); datastore .run_unnamed_tx(|tx| { let helper_task = helper_task.clone(); - let prepare_init = prepare_init.clone(); + let verify_init = verify_init.clone(); let transcript = transcript.clone(); Box::pin(async move { tx.put_aggregator_task(&helper_task).await.unwrap(); tx.put_scrubbed_report( helper_task.id(), - prepare_init.report_share().metadata().id(), - prepare_init.report_share().metadata().time(), + verify_init.report_share().metadata().id(), + verify_init.report_share().metadata().time(), ) .await .unwrap(); @@ -474,7 +474,7 @@ mod tests { aggregation_parameter, (), Interval::new( - *prepare_init.report_share().metadata().time(), + *verify_init.report_share().metadata().time(), janus_messages::Duration::ONE, ) .unwrap(), @@ -487,13 +487,12 @@ mod tests { tx.put_report_aggregation::<0, dummy::Vdaf>(&ReportAggregation::new( *helper_task.id(), aggregation_job_id, - *prepare_init.report_share().metadata().id(), - *prepare_init.report_share().metadata().time(), + *verify_init.report_share().metadata().id(), + *verify_init.report_share().metadata().time(), 0, None, ReportAggregationState::HelperContinue { - prepare_state: *transcript.helper_prepare_transitions[0] - .prepare_state(), + verify_state: *transcript.helper_verify_transitions[0].verify_state(), }, )) .await @@ -507,9 +506,9 @@ mod tests { let first_continue_request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( - *prepare_init.report_share().metadata().id(), - transcript.leader_prepare_transitions[1] + Vec::from([VerifyContinue::new( + *verify_init.report_share().metadata().id(), + transcript.leader_verify_transitions[1] .message() .unwrap() .clone(), @@ -531,7 +530,7 @@ mod tests { AggregationJobContinueTestCase { task, datastore, - prepare_init_generator, + verify_init_generator, aggregation_job_id, aggregation_parameter, first_continue_request, @@ -559,11 +558,11 @@ mod tests { assert_eq!( first_continue_response, Some(AggregationJobResp { - prepare_resps: test_case + verify_resps: test_case .first_continue_request - .prepare_continues() + .verify_continues() .iter() - .map(|step| PrepareResp::new(*step.report_id(), PrepareStepResult::Finished)) + .map(|step| VerifyResp::new(*step.report_id(), VerifyStepResult::Finished)) .collect() }) ); @@ -660,10 +659,7 @@ mod tests { // to advance to step 0. Should be rejected because that is an illegal transition. let step_zero_request = AggregationJobContinueReq::new( AggregationJobStep::from(0), - test_case - .first_continue_request - .prepare_continues() - .to_vec(), + test_case.first_continue_request.verify_continues().to_vec(), ); post_aggregation_job_expecting_error( @@ -704,21 +700,21 @@ mod tests { async fn aggregation_job_continue_step_recovery_mutate_continue_request() { let test_case = setup_aggregation_job_continue_step_recovery_test().await; - let (unrelated_prepare_init, unrelated_transcript) = - test_case.prepare_init_generator.next(&13); + let (unrelated_verify_init, unrelated_transcript) = + test_case.verify_init_generator.next(&13); let (before_aggregation_job, before_report_aggregations) = test_case .datastore .run_unnamed_tx(|tx| { let task_id = *test_case.task.id(); - let unrelated_prepare_init = unrelated_prepare_init.clone(); + let unrelated_verify_init = unrelated_verify_init.clone(); let aggregation_job_id = test_case.aggregation_job_id; Box::pin(async move { tx.put_scrubbed_report( &task_id, - unrelated_prepare_init.report_share().metadata().id(), - unrelated_prepare_init.report_share().metadata().time(), + unrelated_verify_init.report_share().metadata().id(), + unrelated_verify_init.report_share().metadata().time(), ) .await .unwrap(); @@ -751,9 +747,9 @@ mod tests { // ID. let modified_request = AggregationJobContinueReq::new( test_case.first_continue_request.step(), - Vec::from([PrepareContinue::new( - *unrelated_prepare_init.report_share().metadata().id(), - unrelated_transcript.leader_prepare_transitions[1] + Vec::from([VerifyContinue::new( + *unrelated_verify_init.report_share().metadata().id(), + unrelated_transcript.leader_verify_transitions[1] .message() .unwrap() .clone(), @@ -835,10 +831,7 @@ mod tests { // Send another request for a step that the helper is past. Should fail. let past_step_request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - test_case - .first_continue_request - .prepare_continues() - .to_vec(), + test_case.first_continue_request.verify_continues().to_vec(), ); post_aggregation_job_expecting_error( @@ -848,7 +841,7 @@ mod tests { &test_case.router, StatusCode::BAD_REQUEST, "urn:ietf:params:ppm:dap:error:stepMismatch", - "The leader and helper are not on the same step of VDAF preparation.", + "The leader and helper are not on the same step of VDAF verification.", None, None, ) @@ -863,10 +856,7 @@ mod tests { // helper isn't on that step. let future_step_request = AggregationJobContinueReq::new( AggregationJobStep::from(17), - test_case - .first_continue_request - .prepare_continues() - .to_vec(), + test_case.first_continue_request.verify_continues().to_vec(), ); post_aggregation_job_expecting_error( @@ -876,7 +866,7 @@ mod tests { &test_case.router, StatusCode::BAD_REQUEST, "urn:ietf:params:ppm:dap:error:stepMismatch", - "The leader and helper are not on the same step of VDAF preparation.", + "The leader and helper are not on the same step of VDAF verification.", None, None, ) @@ -928,11 +918,11 @@ mod tests { .unwrap(); // Subsequent attempts to initialize the job should fail. - let (prep_init, _) = test_case.prepare_init_generator.next(&13); + let (verify_init_msg, _) = test_case.verify_init_generator.next(&13); let init_req = AggregationJobInitializeReq::new( test_case.aggregation_parameter.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([prep_init]), + Vec::from([verify_init_msg]), ); let mut response = put_aggregation_job( &test_case.task, diff --git a/aggregator/src/aggregator/aggregation_job_driver.rs b/aggregator/src/aggregator/aggregation_job_driver.rs index 2104b07e5..10f9f5a27 100644 --- a/aggregator/src/aggregator/aggregation_job_driver.rs +++ b/aggregator/src/aggregator/aggregation_job_driver.rs @@ -34,8 +34,8 @@ use janus_core::{ }; use janus_messages::{ AggregationJobContinueReq, AggregationJobInitializeReq, AggregationJobResp, MediaType, - PartialBatchSelector, PrepareContinue, PrepareInit, PrepareResp, PrepareStepResult, - ReportError, ReportMetadata, ReportShare, Role, + PartialBatchSelector, ReportError, ReportMetadata, ReportShare, Role, VerifyContinue, + VerifyInit, VerifyResp, VerifyStepResult, batch_mode::{LeaderSelected, TimeInterval}, }; use opentelemetry::{ @@ -449,7 +449,7 @@ where let ctx = vdaf_application_context(&task_id); // Compute report shares to send to helper, and decrypt our input shares & - // initialize preparation state. + // initialize verification state. report_aggregations.into_par_iter().try_for_each( |report_aggregation| { let _entered = span.enter(); @@ -505,7 +505,7 @@ where )).map_err(|_| ()); } - // Initialize the leader's preparation state from the input share. + // Initialize the leader's verification state from the input share. let public_share_bytes = match public_share.get_encoded() { Ok(public_share_bytes) => public_share_bytes, Err(err) => { @@ -521,7 +521,7 @@ where } }; - match trace_span!("VDAF preparation (leader initialization)").in_scope(|| { + match trace_span!("VDAF verification (leader initialization)").in_scope(|| { vdaf.leader_initialized( verify_key.as_bytes(), &ctx, @@ -546,7 +546,7 @@ where Ok(Continued { message, verifier_state }) => { pi_and_sa_sender.send(( report_aggregation.ord(), - PrepareInit::new( + VerifyInit::new( ReportShare::new( ReportMetadata::new( *report_aggregation.report_id(), @@ -560,7 +560,7 @@ where ), SteppedAggregation::new( report_aggregation, - Either::PrepareState(verifier_state), + Either::VerifyState(verifier_state), ), )).map_err(|_| ()) } @@ -576,7 +576,7 @@ where } }); - let (report_aggregations_to_write, (prepare_inits, stepped_aggregations)) = join!( + let (report_aggregations_to_write, (verify_inits, stepped_aggregations)) = join!( async move { let mut report_aggregations_to_write = Vec::with_capacity(report_aggregation_count); while ra_receiver @@ -590,9 +590,9 @@ where let mut pis_and_sas = Vec::with_capacity(report_aggregation_count); while pi_and_sa_receiver.recv_many(&mut pis_and_sas, 10).await > 0 {} pis_and_sas.sort_unstable_by_key(|(ord, _, _)| *ord); - let (prepare_inits, stepped_aggregations): (Vec<_>, Vec<_>) = + let (verify_inits, stepped_aggregations): (Vec<_>, Vec<_>) = pis_and_sas.into_iter().map(|(_, pi, sa)| (pi, sa)).unzip(); - (prepare_inits, stepped_aggregations) + (verify_inits, stepped_aggregations) }, ); @@ -606,12 +606,12 @@ where } }); assert_eq!( - report_aggregations_to_write.len() + prepare_inits.len(), + report_aggregations_to_write.len() + verify_inits.len(), report_aggregation_count ); - assert_eq!(prepare_inits.len(), stepped_aggregations.len()); + assert_eq!(verify_inits.len(), stepped_aggregations.len()); - let (resp, retry_after) = if !prepare_inits.is_empty() { + let (resp, retry_after) = if !verify_inits.is_empty() { // Construct request, send it to the helper, and process the response. let request = AggregationJobInitializeReq::::new( aggregation_job @@ -619,7 +619,7 @@ where .get_encoded() .map_err(Error::MessageEncode)?, PartialBatchSelector::new(aggregation_job.partial_batch_identifier().clone()), - prepare_inits, + verify_inits, ); let http_response = send_request_to_helper( @@ -666,7 +666,7 @@ where // artificial aggregation job response instead, which will finish the aggregation job. ( Some(AggregationJobResp { - prepare_resps: Vec::new(), + verify_resps: Vec::new(), }), None, ) @@ -752,7 +752,7 @@ where }; let (message, either) = - match trace_span!("VDAF preparation (leader continuation evaluation)") + match trace_span!("VDAF verification (leader continuation evaluation)") .in_scope(|| continuation.evaluate(&ctx, vdaf.as_ref())) { // If we are continuing, then the state can only be Continued or @@ -760,7 +760,7 @@ where Ok(PingPongState::Continued(Continued { message, verifier_state, - })) => (message, Either::PrepareState(verifier_state)), + })) => (message, Either::VerifyState(verifier_state)), Ok(PingPongState::FinishedWithOutbound { message, output_share, @@ -788,7 +788,7 @@ where pc_and_sa_sender .send(( report_aggregation.ord(), - PrepareContinue::new( + VerifyContinue::new( *report_aggregation.report_id(), message.clone(), ), @@ -799,7 +799,7 @@ where } }); - let (report_aggregations_to_write, (prepare_continues, stepped_aggregations)) = join!( + let (report_aggregations_to_write, (verify_continues, stepped_aggregations)) = join!( async move { let mut report_aggregations_to_write = Vec::with_capacity(report_aggregation_count); while ra_receiver @@ -813,9 +813,9 @@ where let mut pcs_and_sas = Vec::with_capacity(report_aggregation_count); while pc_and_sa_receiver.recv_many(&mut pcs_and_sas, 10).await > 0 {} pcs_and_sas.sort_unstable_by_key(|(ord, _, _)| *ord); - let (prepare_continues, stepped_aggregations): (Vec<_>, Vec<_>) = + let (verify_continues, stepped_aggregations): (Vec<_>, Vec<_>) = pcs_and_sas.into_iter().map(|(_, pc, sa)| (pc, sa)).unzip(); - (prepare_continues, stepped_aggregations) + (verify_continues, stepped_aggregations) } ); @@ -829,13 +829,13 @@ where } }); assert_eq!( - report_aggregations_to_write.len() + prepare_continues.len(), + report_aggregations_to_write.len() + verify_continues.len(), report_aggregation_count ); - assert_eq!(prepare_continues.len(), stepped_aggregations.len()); + assert_eq!(verify_continues.len(), stepped_aggregations.len()); // Construct request, send it to the helper, and process the response. - let request = AggregationJobContinueReq::new(aggregation_job.step(), prepare_continues); + let request = AggregationJobContinueReq::new(aggregation_job.step(), verify_continues); let http_response = send_request_to_helper( &self.http_client, @@ -908,10 +908,10 @@ where .into_iter() .filter_map(|report_aggregation| { let leader_state_or_output_share = match report_aggregation.state() { - // Leader was in the init state, so re-hydrate the prepare state into + // Leader was in the init state, so re-hydrate the verify state into // PingPongState::Continued. - ReportAggregationState::LeaderPollInit { prepare_state } => { - Ok(Either::PrepareState(prepare_state.clone())) + ReportAggregationState::LeaderPollInit { verify_state } => { + Ok(Either::VerifyState(verify_state.clone())) } // Leader was in the continue state, so re-evaluate the transition into either // PingPongState::Continued or ::Finished. @@ -922,7 +922,7 @@ where .map_err(|e| Error::Internal(e.into())) .map(|ping_pong_state| match ping_pong_state { PingPongState::Continued(Continued { verifier_state, .. }) => { - Either::PrepareState(verifier_state) + Either::VerifyState(verifier_state) } PingPongState::Finished { output_share } | PingPongState::FinishedWithOutbound { output_share, .. } => { @@ -1020,7 +1020,7 @@ where .await } - Some(AggregationJobResp { prepare_resps }) => { + Some(AggregationJobResp { verify_resps }) => { self.step_aggregation_job_leader_process_response_finished( datastore, vdaf, @@ -1029,7 +1029,7 @@ where aggregation_job, stepped_aggregations, report_aggregations_to_write, - prepare_resps, + verify_resps, ) .await } @@ -1065,8 +1065,8 @@ where // Transition from init state to polling init state ( ReportAggregationState::LeaderInit { .. }, - Either::PrepareState(prepare_state), - ) => ReportAggregationState::LeaderPollInit { prepare_state }, + Either::VerifyState(verify_state), + ) => ReportAggregationState::LeaderPollInit { verify_state }, // Transition from continue state to polling continue state (ReportAggregationState::LeaderContinue { continuation }, _) => { ReportAggregationState::LeaderPollContinue { @@ -1156,23 +1156,24 @@ where aggregation_job: AggregationJob, stepped_aggregations: Vec>, mut report_aggregations_to_write: Vec>, - prepare_resps: Vec, + verify_resps: Vec, ) -> Result<(), Error> { // Handle response, computing the new report aggregations to be stored. let task_aggregation_counters = TaskAggregationCounter::default(); let expected_report_aggregation_count = report_aggregations_to_write.len() + stepped_aggregations.len(); - if stepped_aggregations.len() != prepare_resps.len() { + if stepped_aggregations.len() != verify_resps.len() { return Err(Error::Internal( - "missing, duplicate, out-of-order, or unexpected prepare steps in response".into(), + "missing, duplicate, out-of-order, or unexpected verify steps in response".into(), )); } - for (stepped_aggregation, helper_prep_resp) in - stepped_aggregations.iter().zip(&prepare_resps) + for (stepped_aggregation, helper_verify_resp) in + stepped_aggregations.iter().zip(&verify_resps) { - if stepped_aggregation.report_aggregation.report_id() != helper_prep_resp.report_id() { + if stepped_aggregation.report_aggregation.report_id() != helper_verify_resp.report_id() + { return Err(Error::Internal( - "missing, duplicate, out-of-order, or unexpected prepare steps in response" + "missing, duplicate, out-of-order, or unexpected verify steps in response" .into(), )); } @@ -1201,51 +1202,52 @@ where stepped_aggregations .into_par_iter() - .zip(prepare_resps) - .try_for_each(|(stepped_aggregation, helper_prep_resp)| { + .zip(verify_resps) + .try_for_each(|(stepped_aggregation, helper_verify_resp)| { let _entered = span.enter(); let (new_state, output_share) = match ( stepped_aggregation.leader_state_or_output_share, - helper_prep_resp.result(), + helper_verify_resp.result(), ) { // Leader is in state continued, incoming helper message is continue. // Leader continues. // This can happen while handling a response to AggregationJobInitReq or // AggregationJobContinueReq. ( - Either::PrepareState(leader_prepare_state), - PrepareStepResult::Continue { - message: helper_prep_msg, + Either::VerifyState(leader_verify_state), + VerifyStepResult::Continue { + message: helper_verify_msg, }, ) => { - let continuation_and_state = trace_span!( - "VDAF preparation (leader continuation)" - ) - .in_scope(|| { - vdaf.leader_continued( - &ctx, - aggregation_job.aggregation_parameter(), - leader_prepare_state.clone(), - helper_prep_msg, - ) - .and_then(|c| Ok((c.clone(), c.evaluate(&ctx, &vdaf)?))) - .map_err( - |ping_pong_error| { - let report_error = handle_ping_pong_error( - &task_id, - Role::Leader, - stepped_aggregation.report_aggregation.report_id(), - ping_pong_error, - &aggregate_step_failure_counter, - ); - - task_aggregation_counters - .increment_with_report_error(report_error); - report_error - }, - ) - }); + let continuation_and_state = + trace_span!("VDAF verification (leader continuation)") + .in_scope(|| { + vdaf.leader_continued( + &ctx, + aggregation_job.aggregation_parameter(), + leader_verify_state.clone(), + helper_verify_msg, + ) + .and_then(|c| Ok((c.clone(), c.evaluate(&ctx, &vdaf)?))) + .map_err( + |ping_pong_error| { + let report_error = handle_ping_pong_error( + &task_id, + Role::Leader, + stepped_aggregation + .report_aggregation + .report_id(), + ping_pong_error, + &aggregate_step_failure_counter, + ); + + task_aggregation_counters + .increment_with_report_error(report_error); + report_error + }, + ) + }); match continuation_and_state { // Leader has an outbound message: continue. @@ -1274,7 +1276,7 @@ where } // If helper continued but leader is in any state but continue, that's // illegal. - (_, PrepareStepResult::Continue { .. }) => { + (_, VerifyStepResult::Continue { .. }) => { warn!( report_id = %stepped_aggregation.report_aggregation.report_id(), "Helper continued but Leader did not", @@ -1282,10 +1284,10 @@ where aggregate_step_failure_counter .add(1, &[KeyValue::new("type", "continue_mismatch")]); task_aggregation_counters - .increment_with_report_error(ReportError::VdafPrepError); + .increment_with_report_error(ReportError::VdafVerifyError); ( ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, None, ) @@ -1294,12 +1296,12 @@ where // finished. Leader commits output share. // This can only happen while handling a response to // AggregationJobContinueReq. - (Either::OutputShare(output_share), PrepareStepResult::Finished) => { + (Either::OutputShare(output_share), VerifyStepResult::Finished) => { (ReportAggregationState::Finished, Some(output_share.clone())) } // If helper finished but leader is in any state but finished, that's // illegal. - (_, PrepareStepResult::Finished) => { + (_, VerifyStepResult::Finished) => { warn!( report_id = %stepped_aggregation.report_aggregation.report_id(), "Helper finished but Leader did not", @@ -1307,10 +1309,10 @@ where aggregate_step_failure_counter .add(1, &[KeyValue::new("type", "finish_mismatch")]); task_aggregation_counters - .increment_with_report_error(ReportError::VdafPrepError); + .increment_with_report_error(ReportError::VdafVerifyError); ( ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, None, ) @@ -1319,7 +1321,7 @@ where // helper message is rejected. Leader drops this report. // This can happen while handling a response to AggregationJobInitReq or // AggregationJobContinueReq. - (_, PrepareStepResult::Reject(err)) => { + (_, VerifyStepResult::Reject(err)) => { // TODO(#236): is it correct to just record the transition error // that the helper reports? info!( @@ -1967,7 +1969,7 @@ where } } -/// SteppedAggregation represents a report aggregation along with the associated preparation-state. +/// SteppedAggregation represents a report aggregation along with the associated verification-state. struct SteppedAggregation> { report_aggregation: ReportAggregation, leader_state_or_output_share: Either, @@ -1987,7 +1989,7 @@ impl> SteppedAggregation { - PrepareState(PS), + VerifyState(PS), OutputShare(OS), } diff --git a/aggregator/src/aggregator/aggregation_job_driver/tests.rs b/aggregator/src/aggregator/aggregation_job_driver/tests.rs index b74f586d4..5e0d57e60 100644 --- a/aggregator/src/aggregator/aggregation_job_driver/tests.rs +++ b/aggregator/src/aggregator/aggregation_job_driver/tests.rs @@ -35,9 +35,9 @@ use janus_core::{ }; use janus_messages::{ AggregationJobContinueReq, AggregationJobInitializeReq, AggregationJobResp, AggregationJobStep, - Duration, Extension, ExtensionType, Interval, MediaType, PartialBatchSelector, PrepareContinue, - PrepareInit, PrepareResp, PrepareStepResult, ReportError, ReportIdChecksum, ReportMetadata, - ReportShare, Role, Time, TimePrecision, + Duration, Extension, ExtensionType, Interval, MediaType, PartialBatchSelector, ReportError, + ReportIdChecksum, ReportMetadata, ReportShare, Role, Time, TimePrecision, VerifyContinue, + VerifyInit, VerifyResp, VerifyStepResult, batch_mode::{LeaderSelected, TimeInterval}, problem_type::DapProblemType, }; @@ -125,7 +125,7 @@ async fn aggregation_job_driver() { ReportError::ReportDropped, ReportError::HpkeUnknownConfigId, ReportError::HpkeDecryptError, - ReportError::VdafPrepError, + ReportError::VdafVerifyError, ReportError::TaskNotStarted, ReportError::TaskExpired, ReportError::InvalidMessage, @@ -133,7 +133,7 @@ async fn aggregation_job_driver() { ]; let mut rejected_reports: Vec<_> = report_errors .into_iter() - .map(|prepare_error| { + .map(|verify_error| { let rejected_report_metadata = ReportMetadata::new(random(), time, Vec::new()); let rejected_transcript = run_vdaf( vdaf.as_ref(), @@ -151,7 +151,7 @@ async fn aggregation_job_driver() { &rejected_transcript, ); - (rejected_report, prepare_error) + (rejected_report, verify_error) }) .collect(); rejected_reports.sort_by_key(|(report, _)| *report.metadata().id()); @@ -243,10 +243,10 @@ async fn aggregation_job_driver() { AggregationJobInitializeReq::::MEDIA_TYPE, AggregationJobResp::MEDIA_TYPE, AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *accepted_report.metadata().id(), - PrepareStepResult::Continue { - message: accepted_transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: accepted_transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -258,9 +258,9 @@ async fn aggregation_job_driver() { rejected_reports .iter() .map(|(rejected_report, report_error)| { - PrepareResp::new( + VerifyResp::new( *rejected_report.metadata().id(), - PrepareStepResult::Reject(*report_error), + VerifyStepResult::Reject(*report_error), ) }), ) @@ -274,9 +274,9 @@ async fn aggregation_job_driver() { AggregationJobContinueReq::MEDIA_TYPE, AggregationJobResp::MEDIA_TYPE, AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *accepted_report.metadata().id(), - PrepareStepResult::Finished, + VerifyStepResult::Finished, )]), } .get_encoded() @@ -676,23 +676,23 @@ async fn leader_sync_time_interval_aggregation_job_init_single_step() { let leader_request = AggregationJobInitializeReq::new( ().get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( ReportShare::new( report.metadata().clone(), report.public_share().get_encoded().unwrap(), report.helper_encrypted_input_share().clone(), ), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), )]), ); let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Continue { - message: transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -1062,23 +1062,23 @@ async fn leader_sync_time_interval_aggregation_job_init_two_steps() { let leader_request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( ReportShare::new( report.metadata().clone(), report.public_share().get_encoded().unwrap(), report.helper_encrypted_input_share().clone(), ), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), )]), ); let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Continue { - message: transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -1151,7 +1151,7 @@ async fn leader_sync_time_interval_aggregation_job_init_two_steps() { 0, None, ReportAggregationState::LeaderContinue { - continuation: transcript.leader_prepare_transitions[1] + continuation: transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -1435,24 +1435,24 @@ async fn leader_sync_time_interval_aggregation_job_init_partially_garbage_collec ().get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), Vec::from([ - PrepareInit::new( + VerifyInit::new( ReportShare::new( gc_eligible_report.metadata().clone(), gc_eligible_report.public_share().get_encoded().unwrap(), gc_eligible_report.helper_encrypted_input_share().clone(), ), - gc_eligible_transcript.leader_prepare_transitions[0] + gc_eligible_transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), ), - PrepareInit::new( + VerifyInit::new( ReportShare::new( gc_ineligible_report.metadata().clone(), gc_ineligible_report.public_share().get_encoded().unwrap(), gc_ineligible_report.helper_encrypted_input_share().clone(), ), - gc_ineligible_transcript.leader_prepare_transitions[0] + gc_ineligible_transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -1460,20 +1460,20 @@ async fn leader_sync_time_interval_aggregation_job_init_partially_garbage_collec ]), ); let helper_response = AggregationJobResp { - prepare_resps: Vec::from([ - PrepareResp::new( + verify_resps: Vec::from([ + VerifyResp::new( *gc_eligible_report.metadata().id(), - PrepareStepResult::Continue { - message: gc_eligible_transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: gc_eligible_transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), }, ), - PrepareResp::new( + VerifyResp::new( *gc_ineligible_report.metadata().id(), - PrepareStepResult::Continue { - message: gc_ineligible_transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: gc_ineligible_transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -1763,23 +1763,23 @@ async fn leader_sync_leader_selected_aggregation_job_init_single_step() { let leader_request = AggregationJobInitializeReq::new( ().get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( ReportShare::new( report.metadata().clone(), report.public_share().get_encoded().unwrap(), report.helper_encrypted_input_share().clone(), ), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), )]), ); let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Continue { - message: transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -2081,23 +2081,23 @@ async fn leader_sync_leader_selected_aggregation_job_init_two_steps() { let leader_request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( ReportShare::new( report.metadata().clone(), report.public_share().get_encoded().unwrap(), report.helper_encrypted_input_share().clone(), ), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), )]), ); let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Continue { - message: transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -2170,7 +2170,7 @@ async fn leader_sync_leader_selected_aggregation_job_init_two_steps() { 0, None, ReportAggregationState::LeaderContinue { - continuation: transcript.leader_prepare_transitions[1] + continuation: transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -2329,7 +2329,7 @@ async fn leader_sync_time_interval_aggregation_job_continue() { 0, None, ReportAggregationState::LeaderContinue { - continuation: transcript.leader_prepare_transitions[1] + continuation: transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -2391,18 +2391,18 @@ async fn leader_sync_time_interval_aggregation_job_continue() { // verification -- but mockito does not expose this functionality at time of writing.) let leader_request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( *report.metadata().id(), - transcript.leader_prepare_transitions[1] + transcript.leader_verify_transitions[1] .message() .unwrap() .clone(), )]), ); let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Finished, + VerifyStepResult::Finished, )]), }; let mocked_aggregate_failure = server @@ -2681,7 +2681,7 @@ async fn leader_sync_leader_selected_aggregation_job_continue() { 0, None, ReportAggregationState::LeaderContinue { - continuation: transcript.leader_prepare_transitions[1] + continuation: transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -2727,18 +2727,18 @@ async fn leader_sync_leader_selected_aggregation_job_continue() { // verification -- but mockito does not expose this functionality at time of writing.) let leader_request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( *report.metadata().id(), - transcript.leader_prepare_transitions[1] + transcript.leader_verify_transitions[1] .message() .unwrap() .clone(), )]), ); let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Finished, + VerifyStepResult::Finished, )]), }; let mocked_aggregate_failure = server @@ -3000,13 +3000,13 @@ async fn leader_async_aggregation_job_init_to_pending() { let leader_request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( ReportShare::new( report.metadata().clone(), report.public_share().get_encoded().unwrap(), report.helper_encrypted_input_share().clone(), ), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -3077,7 +3077,7 @@ async fn leader_async_aggregation_job_init_to_pending() { 0, None, ReportAggregationState::LeaderPollInit { - prepare_state: *transcript.leader_prepare_transitions[0].prepare_state(), + verify_state: *transcript.leader_verify_transitions[0].verify_state(), }, ); @@ -3256,13 +3256,13 @@ async fn leader_async_aggregation_job_init_to_pending_two_step() { let leader_request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( ReportShare::new( report.metadata().clone(), report.public_share().get_encoded().unwrap(), report.helper_encrypted_input_share().clone(), ), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -3333,7 +3333,7 @@ async fn leader_async_aggregation_job_init_to_pending_two_step() { 0, None, ReportAggregationState::LeaderPollInit { - prepare_state: *transcript.leader_prepare_transitions[0].prepare_state(), + verify_state: *transcript.leader_verify_transitions[0].verify_state(), }, ); @@ -3453,7 +3453,7 @@ async fn leader_async_aggregation_job_continue_to_pending() { .run_unnamed_tx(|tx| { let task = leader_task.clone(); let report = report.clone(); - let continuation = transcript.leader_prepare_transitions[1] + let continuation = transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(); @@ -3522,9 +3522,9 @@ async fn leader_async_aggregation_job_continue_to_pending() { // Setup: prepare mocked HTTP response. let leader_request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( *report.metadata().id(), - transcript.leader_prepare_transitions[1] + transcript.leader_verify_transitions[1] .message() .unwrap() .clone(), @@ -3592,7 +3592,7 @@ async fn leader_async_aggregation_job_continue_to_pending() { 0, None, ReportAggregationState::LeaderPollContinue { - continuation: transcript.leader_prepare_transitions[1] + continuation: transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -3714,7 +3714,7 @@ async fn leader_async_aggregation_job_init_poll_to_pending() { .run_unnamed_tx(|tx| { let task = leader_task.clone(); let report = report.clone(); - let prepare_state = *transcript.leader_prepare_transitions[0].prepare_state(); + let verify_state = *transcript.leader_verify_transitions[0].verify_state(); Box::pin(async move { tx.put_aggregator_task(&task).await.unwrap(); @@ -3743,7 +3743,7 @@ async fn leader_async_aggregation_job_init_poll_to_pending() { *report.metadata().time(), 0, None, - ReportAggregationState::LeaderPollInit { prepare_state }, + ReportAggregationState::LeaderPollInit { verify_state }, )) .await .unwrap(); @@ -3839,7 +3839,7 @@ async fn leader_async_aggregation_job_init_poll_to_pending() { 0, None, ReportAggregationState::LeaderPollInit { - prepare_state: *transcript.leader_prepare_transitions[0].prepare_state(), + verify_state: *transcript.leader_verify_transitions[0].verify_state(), }, ); @@ -3958,7 +3958,7 @@ async fn leader_async_aggregation_job_init_poll_to_pending_two_step() { .run_unnamed_tx(|tx| { let task = leader_task.clone(); let report = report.clone(); - let prepare_state = *transcript.leader_prepare_transitions[0].prepare_state(); + let verify_state = *transcript.leader_verify_transitions[0].verify_state(); Box::pin(async move { tx.put_aggregator_task(&task).await.unwrap(); @@ -3987,7 +3987,7 @@ async fn leader_async_aggregation_job_init_poll_to_pending_two_step() { *report.metadata().time(), 0, None, - ReportAggregationState::LeaderPollInit { prepare_state }, + ReportAggregationState::LeaderPollInit { verify_state }, )) .await .unwrap(); @@ -4083,7 +4083,7 @@ async fn leader_async_aggregation_job_init_poll_to_pending_two_step() { 0, None, ReportAggregationState::LeaderPollInit { - prepare_state: *transcript.leader_prepare_transitions[0].prepare_state(), + verify_state: *transcript.leader_verify_transitions[0].verify_state(), }, ); @@ -4202,7 +4202,7 @@ async fn leader_async_aggregation_job_init_poll_to_finished() { .run_unnamed_tx(|tx| { let task = leader_task.clone(); let report = report.clone(); - let prepare_state = *transcript.leader_prepare_transitions[0].prepare_state(); + let verify_state = *transcript.leader_verify_transitions[0].verify_state(); Box::pin(async move { tx.put_aggregator_task(&task).await.unwrap(); @@ -4231,7 +4231,7 @@ async fn leader_async_aggregation_job_init_poll_to_finished() { *report.metadata().time(), 0, None, - ReportAggregationState::LeaderPollInit { prepare_state }, + ReportAggregationState::LeaderPollInit { verify_state }, )) .await .unwrap(); @@ -4267,10 +4267,10 @@ async fn leader_async_aggregation_job_init_poll_to_finished() { // Setup: prepare mocked HTTP response. let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Continue { - message: transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -4463,7 +4463,7 @@ async fn leader_async_aggregation_job_init_poll_to_continue() { .run_unnamed_tx(|tx| { let task = leader_task.clone(); let report = report.clone(); - let prepare_state = *transcript.leader_prepare_transitions[0].prepare_state(); + let verify_state = *transcript.leader_verify_transitions[0].verify_state(); Box::pin(async move { tx.put_aggregator_task(&task).await.unwrap(); @@ -4492,7 +4492,7 @@ async fn leader_async_aggregation_job_init_poll_to_continue() { *report.metadata().time(), 0, None, - ReportAggregationState::LeaderPollInit { prepare_state }, + ReportAggregationState::LeaderPollInit { verify_state }, )) .await .unwrap(); @@ -4528,10 +4528,10 @@ async fn leader_async_aggregation_job_init_poll_to_continue() { // Setup: prepare mocked HTTP response. let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Continue { - message: transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -4602,7 +4602,7 @@ async fn leader_async_aggregation_job_init_poll_to_continue() { 0, None, ReportAggregationState::LeaderContinue { - continuation: transcript.leader_prepare_transitions[1] + continuation: transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -4724,7 +4724,7 @@ async fn leader_async_aggregation_job_continue_poll_to_pending() { .run_unnamed_tx(|tx| { let task = leader_task.clone(); let report = report.clone(); - let continuation = transcript.leader_prepare_transitions[1] + let continuation = transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(); @@ -4855,7 +4855,7 @@ async fn leader_async_aggregation_job_continue_poll_to_pending() { 0, None, ReportAggregationState::LeaderPollContinue { - continuation: transcript.leader_prepare_transitions[1] + continuation: transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -4977,7 +4977,7 @@ async fn leader_async_aggregation_job_continue_poll_to_finished() { .run_unnamed_tx(|tx| { let task = leader_task.clone(); let report = report.clone(); - let continuation = transcript.leader_prepare_transitions[1] + let continuation = transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(); @@ -5049,9 +5049,9 @@ async fn leader_async_aggregation_job_continue_poll_to_finished() { // Setup: prepare mocked HTTP responses. let helper_response = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report.metadata().id(), - PrepareStepResult::Finished, + VerifyStepResult::Finished, )]), }; let mocked_aggregate_success = server @@ -5234,7 +5234,7 @@ async fn helper_async_init_processing_to_finished() { .run_unnamed_tx(|tx| { let helper_task = helper_task.clone(); let report_share = report_share.clone(); - let message = transcript.leader_prepare_transitions[0] + let message = transcript.leader_verify_transitions[0] .message() .unwrap() .clone(); @@ -5268,7 +5268,7 @@ async fn helper_async_init_processing_to_finished() { 0, None, ReportAggregationState::HelperInitProcessing { - prepare_init: PrepareInit::new(report_share, message), + verify_init: VerifyInit::new(report_share, message), require_taskbind_extension: false, }, )) @@ -5349,10 +5349,10 @@ async fn helper_async_init_processing_to_finished() { *report_share.metadata().id(), *report_share.metadata().time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_share.metadata().id(), - PrepareStepResult::Continue { - message: transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), @@ -5477,7 +5477,7 @@ async fn helper_async_init_processing_to_continue() { .run_unnamed_tx(|tx| { let helper_task = helper_task.clone(); let report_share = report_share.clone(); - let message = transcript.leader_prepare_transitions[0] + let message = transcript.leader_verify_transitions[0] .message() .unwrap() .clone(); @@ -5511,7 +5511,7 @@ async fn helper_async_init_processing_to_continue() { 0, None, ReportAggregationState::HelperInitProcessing { - prepare_init: PrepareInit::new(report_share, message), + verify_init: VerifyInit::new(report_share, message), require_taskbind_extension: false, }, )) @@ -5592,17 +5592,17 @@ async fn helper_async_init_processing_to_continue() { *report_share.metadata().id(), *report_share.metadata().time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_share.metadata().id(), - PrepareStepResult::Continue { - message: transcript.helper_prepare_transitions[0] + VerifyStepResult::Continue { + message: transcript.helper_verify_transitions[0] .message() .unwrap() .clone(), }, )), ReportAggregationState::HelperContinue { - prepare_state: *transcript.helper_prepare_transitions[0].prepare_state(), + verify_state: *transcript.helper_verify_transitions[0].verify_state(), }, ); @@ -5717,8 +5717,8 @@ async fn helper_async_continue_processing_to_finished() { .run_unnamed_tx(|tx| { let helper_task = helper_task.clone(); let report_share = report_share.clone(); - let prepare_state = *transcript.helper_prepare_transitions[0].prepare_state(); - let message = transcript.leader_prepare_transitions[1] + let verify_state = *transcript.helper_verify_transitions[0].verify_state(); + let message = transcript.leader_verify_transitions[1] .message() .unwrap() .clone(); @@ -5752,8 +5752,8 @@ async fn helper_async_continue_processing_to_finished() { 0, None, ReportAggregationState::HelperContinueProcessing { - prepare_state, - prepare_continue: PrepareContinue::new(report_id, message), + verify_state, + verify_continue: VerifyContinue::new(report_id, message), }, )) .await @@ -5833,9 +5833,9 @@ async fn helper_async_continue_processing_to_finished() { *report_share.metadata().id(), *report_share.metadata().time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_share.metadata().id(), - PrepareStepResult::Finished, + VerifyStepResult::Finished, )), ReportAggregationState::Finished, ); diff --git a/aggregator/src/aggregator/aggregation_job_init.rs b/aggregator/src/aggregator/aggregation_job_init.rs index 53b41e397..88bc5df58 100644 --- a/aggregator/src/aggregator/aggregation_job_init.rs +++ b/aggregator/src/aggregator/aggregation_job_init.rs @@ -15,8 +15,8 @@ use janus_core::{ vdaf::vdaf_application_context, }; use janus_messages::{ - ExtensionType, InputShareAad, PlaintextInputShare, PrepareResp, PrepareStepResult, ReportError, - Role, + ExtensionType, InputShareAad, PlaintextInputShare, ReportError, Role, VerifyResp, + VerifyStepResult, }; use opentelemetry::{ KeyValue, @@ -135,24 +135,24 @@ where // Assert safety: this function should only be called with report // aggregations in the HelperInitProcessing state. - let (prepare_init, require_taskbind_extension) = assert_matches!( + let (verify_init, require_taskbind_extension) = assert_matches!( report_aggregation.state(), ReportAggregationState::HelperInitProcessing { - prepare_init, + verify_init, require_taskbind_extension, - } => (prepare_init, *require_taskbind_extension) + } => (verify_init, *require_taskbind_extension) ); // If decryption fails, then the aggregator MUST fail with error // `hpke-decrypt-error`. (ยง4.6.2.3 [dap-16]) let hpke_keypair = hpke_keypairs.keypair( - prepare_init + verify_init .report_share() .encrypted_input_share() .config_id(), ).ok_or_else(|| { debug!( - config_id = %prepare_init.report_share().encrypted_input_share().config_id(), + config_id = %verify_init.report_share().encrypted_input_share().config_id(), "Helper encrypted input share references unknown HPKE config ID" ); metrics @@ -164,14 +164,14 @@ where let plaintext = hpke_keypair.and_then(|hpke_keypair| { let input_share_aad = InputShareAad::new( *task.id(), - prepare_init.report_share().metadata().clone(), - prepare_init.report_share().public_share().to_vec(), + verify_init.report_share().metadata().clone(), + verify_init.report_share().public_share().to_vec(), ) .get_encoded() .map_err(|err| { debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), ?err, "Couldn't encode input share AAD" ); @@ -192,7 +192,7 @@ where &Role::Client, &Role::Helper, ), - prepare_init.report_share().encrypted_input_share(), + verify_init.report_share().encrypted_input_share(), &input_share_aad, ) .inspect(|_| { @@ -201,7 +201,7 @@ where &[KeyValue::new( "hpke_config_id", i64::from(u8::from( - *prepare_init + *verify_init .report_share() .encrypted_input_share() .config_id(), @@ -212,7 +212,7 @@ where .map_err(|error| { debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), ?error, "Couldn't decrypt helper's report share" ); @@ -228,7 +228,7 @@ where .map_err(|error| { debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), ?error, "Couldn't decode helper's plaintext input share", ); metrics.aggregate_step_failure_counter.add( @@ -247,12 +247,12 @@ where for extension in plaintext_input_share .private_extensions() .iter() - .chain(prepare_init.report_share().metadata().public_extensions()) + .chain(verify_init.report_share().metadata().public_extensions()) { if matches!(extension.extension_type(), ExtensionType::Unknown(_)) { debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), unrecognized_extension_type = ?extension.extension_type(), "Received report share with unrecognized extension type", ); @@ -267,7 +267,7 @@ where { debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), "Received report share with duplicate extensions", ); metrics @@ -285,7 +285,7 @@ where if !valid_taskbind_extension_present { debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), "Taskprov task received report with missing or malformed \ taskbind extension", ); @@ -302,7 +302,7 @@ where // taskprov not enabled, but the taskbind extension is present. debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), "Non-taskprov task received report with unexpected taskbind \ extension", ); @@ -324,7 +324,7 @@ where .map_err(|error| { debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), ?error, "Couldn't decode helper's input share", ); metrics @@ -336,12 +336,12 @@ where let public_share = A::PublicShare::get_decoded_with_param( &vdaf, - prepare_init.report_share().public_share(), + verify_init.report_share().public_share(), ) .map_err(|error| { debug!( task_id = %task.id(), - report_id = ?prepare_init.report_share().metadata().id(), + report_id = ?verify_init.report_share().metadata().id(), ?error, "Couldn't decode public share", ); metrics @@ -353,7 +353,7 @@ where let shares = input_share.and_then(|input_share| Ok((public_share?, input_share))); - if let Ok(report_time_dt) = prepare_init + if let Ok(report_time_dt) = verify_init .report_share() .metadata() .time() @@ -377,7 +377,7 @@ where // Reject reports from too far in the future. (ยง4.6.2.4 step 2 [dap-16]) let shares = shares.and_then(|shares| { - if prepare_init + if verify_init .report_share() .metadata() .time() @@ -391,7 +391,7 @@ where // Reject reports before the task has started. (ยง4.6.2.4 step 3 [dap-16]) let shares = shares.and_then(|shares| { if let Some(task_start) = task.task_start() { - if prepare_init + if verify_init .report_share() .metadata() .time() @@ -407,7 +407,7 @@ where // of the task_end time. (ยง4.6.2.4 step 4 [dap-16]) let shares = shares.and_then(|shares| { if let Some(task_end) = task.task_end() { - if prepare_init + if verify_init .report_share() .metadata() .time() @@ -419,28 +419,28 @@ where Ok(shares) }); - // Next, the aggregator runs the preparation-state initialization algorithm + // Next, the aggregator runs the verification-state initialization algorithm // for the VDAF associated with the task and computes the first state // transition. [...] If either step fails, then the aggregator MUST fail // with error `vdaf-prep-error`. (ยง4.6.2 [dap-16]) let init_rslt = shares.and_then(|(public_share, input_share)| { - trace_span!("VDAF preparation (helper initialization)").in_scope(|| { + trace_span!("VDAF verification (helper initialization)").in_scope(|| { vdaf.helper_initialized( verify_key.as_bytes(), &ctx, aggregation_job.aggregation_parameter(), /* report ID is used as VDAF nonce */ - prepare_init.report_share().metadata().id().as_ref(), + verify_init.report_share().metadata().id().as_ref(), &public_share, &input_share, - prepare_init.message(), + verify_init.message(), ) .and_then(|continuation| continuation.evaluate(&ctx, &vdaf)) .map_err(|error| { handle_ping_pong_error( task.id(), Role::Helper, - prepare_init.report_share().metadata().id(), + verify_init.report_share().metadata().id(), error, &metrics.aggregate_step_failure_counter, ) @@ -448,17 +448,17 @@ where }) }); - let (report_aggregation_state, prepare_step_result, output_share) = + let (report_aggregation_state, verify_step_result, output_share) = match init_rslt { - // Helper is not finished. Store the new prepare state, respond to + // Helper is not finished. Store the new verify state, respond to // the leader with the outgoing message and await the next message // from the Leader to advance to the next step. Ok(PingPongState::Continued(Continued{verifier_state, message})) => { ( ReportAggregationState::HelperContinue { - prepare_state: verifier_state + verify_state: verifier_state }, - PrepareStepResult::Continue { message }, + VerifyStepResult::Continue { message }, None, ) } @@ -467,16 +467,16 @@ where // share and respond to the leader with the outgoing message. Ok(PingPongState::FinishedWithOutbound{output_share, message}) => ( ReportAggregationState::Finished, - PrepareStepResult::Continue { message }, + VerifyStepResult::Continue { message }, Some(output_share), ), // Helper cannot finish at this stage Ok(PingPongState::Finished{ .. }) => ( ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, - PrepareStepResult::Reject(ReportError::VdafPrepError), + VerifyStepResult::Reject(ReportError::VdafVerifyError), None, ), @@ -484,18 +484,18 @@ where // abort further processing. Err(report_error) => ( ReportAggregationState::Failed { report_error }, - PrepareStepResult::Reject(report_error), + VerifyStepResult::Reject(report_error), None, ), }; - let report_id = *prepare_init.report_share().metadata().id(); + let report_id = *verify_init.report_share().metadata().id(); sender.send(WritableReportAggregation::new( report_aggregation - .with_last_prep_resp( - Some(PrepareResp::new( + .with_last_verify_resp( + Some(VerifyResp::new( report_id, - prepare_step_result, + verify_step_result, )) ) .with_state(report_aggregation_state), @@ -540,7 +540,7 @@ pub mod test_util { }; use janus_messages::{ AggregationJobId, AggregationJobInitializeReq, Extension, HpkeConfig, MediaType, - PrepareInit, ReportMetadata, ReportShare, + ReportMetadata, ReportShare, VerifyInit, batch_mode::{self}, }; use prio::{ @@ -553,7 +553,7 @@ pub mod test_util { use crate::aggregator::test_util::generate_helper_report_share; #[derive(Clone)] - pub struct PrepareInitGenerator + pub struct VerifyInitGenerator where V: vdaf::Vdaf, { @@ -565,7 +565,7 @@ pub mod test_util { private_extensions: Vec, } - impl PrepareInitGenerator + impl VerifyInitGenerator where V: AsyncAggregator + vdaf::Client<16>, { @@ -594,7 +594,7 @@ pub mod test_util { pub fn next( &self, measurement: &V::Measurement, - ) -> (PrepareInit, VdafTranscript) { + ) -> (VerifyInit, VdafTranscript) { self.next_with_metadata( ReportMetadata::new( random(), @@ -609,13 +609,13 @@ pub mod test_util { &self, report_metadata: ReportMetadata, measurement: &V::Measurement, - ) -> (PrepareInit, VdafTranscript) { + ) -> (VerifyInit, VdafTranscript) { let (report_share, transcript) = self.next_report_share_with_metadata(report_metadata, measurement); ( - PrepareInit::new( + VerifyInit::new( report_share, - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -711,8 +711,8 @@ mod tests { }; use janus_messages::{ AggregationJobId, AggregationJobInitializeReq, AggregationJobResp, Duration, Extension, - ExtensionType, MediaType, PartialBatchSelector, PrepareResp, PrepareStepResult, - ReportError, ReportMetadata, TimePrecision, batch_mode::TimeInterval, + ExtensionType, MediaType, PartialBatchSelector, ReportError, ReportMetadata, TimePrecision, + VerifyResp, VerifyStepResult, batch_mode::TimeInterval, }; use prio::{ codec::Encode, @@ -724,7 +724,7 @@ mod tests { use crate::aggregator::{ Config, - aggregation_job_init::test_util::{PrepareInitGenerator, put_aggregation_job}, + aggregation_job_init::test_util::{VerifyInitGenerator, put_aggregation_job}, http_handlers::{ AggregatorHandlerBuilder, test_util::{decode_response_body, take_problem_details}, @@ -737,7 +737,7 @@ mod tests { > { pub(super) clock: MockClock, pub(super) task: Task, - pub(super) prepare_init_generator: PrepareInitGenerator, + pub(super) verify_init_generator: VerifyInitGenerator, pub(super) aggregation_job_id: AggregationJobId, pub(super) aggregation_job_init_req: AggregationJobInitializeReq, aggregation_job_init_resp: Option, @@ -794,18 +794,15 @@ mod tests { assert_eq!(response.status(), StatusCode::CREATED); let aggregation_job_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( &aggregation_job_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); assert_eq!( - prepare_resps.len(), - test_case.aggregation_job_init_req.prepare_inits().len(), - ); - assert_matches!( - prepare_resps[0].result(), - &PrepareStepResult::Continue { .. } + verify_resps.len(), + test_case.aggregation_job_init_req.verify_inits().len(), ); + assert_matches!(verify_resps[0].result(), &VerifyStepResult::Continue { .. }); test_case.aggregation_job_init_resp = Some(aggregation_job_resp); test_case @@ -853,7 +850,7 @@ mod tests { .unwrap(); let router = builder.build_axum_router(None); - let prepare_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), keypair.config().clone(), @@ -861,22 +858,22 @@ mod tests { aggregation_param.clone(), ); - let prepare_inits = Vec::from([ - prepare_init_generator.next(&measurement).0, - prepare_init_generator.next(&measurement).0, + let verify_inits = Vec::from([ + verify_init_generator.next(&measurement).0, + verify_init_generator.next(&measurement).0, ]); let aggregation_job_id = random(); let aggregation_job_init_req = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - prepare_inits.clone(), + verify_inits.clone(), ); AggregationJobInitTestCase { clock, task, - prepare_init_generator, + verify_init_generator, aggregation_job_id, aggregation_job_init_req, aggregation_job_init_resp: None, @@ -983,8 +980,8 @@ mod tests { ) .await; - let prepare_init = test_case - .prepare_init_generator + let verify_init = test_case + .verify_init_generator .clone() .with_private_extensions(Vec::from([Extension::new( ExtensionType::Taskbind, @@ -992,11 +989,11 @@ mod tests { )])) .next(&0) .0; - let report_id = *prepare_init.report_share().metadata().id(); + let report_id = *verify_init.report_share().metadata().id(); let aggregation_job_init_req = AggregationJobInitializeReq::new( dummy::AggregationParam(1).get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([prepare_init]), + Vec::from([verify_init]), ); let mut response = put_aggregation_job( @@ -1009,9 +1006,9 @@ mod tests { assert_eq!(response.status(), StatusCode::CREATED); let want_aggregation_job_resp = AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( report_id, - PrepareStepResult::Reject(ReportError::InvalidMessage), + VerifyStepResult::Reject(ReportError::InvalidMessage), )]), }; let got_aggregation_job_resp: AggregationJobResp = @@ -1027,7 +1024,7 @@ mod tests { let mutated_aggregation_job_init_req = AggregationJobInitializeReq::new( dummy::AggregationParam(1).get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - test_case.aggregation_job_init_req.prepare_inits().to_vec(), + test_case.aggregation_job_init_req.verify_inits().to_vec(), ); let response = put_aggregation_job( @@ -1044,32 +1041,28 @@ mod tests { async fn aggregation_job_mutation_report_shares() { let test_case = setup_aggregate_init_test().await; - let prepare_inits = test_case.aggregation_job_init_req.prepare_inits(); + let verify_inits = test_case.aggregation_job_init_req.verify_inits(); // Put the aggregation job again, mutating the associated report shares' metadata such that // uniqueness constraints on client_reports are violated - for mutated_prepare_inits in [ + for mutated_verify_inits in [ // Omit a report share that was included previously - Vec::from(&prepare_inits[0..prepare_inits.len() - 1]), + Vec::from(&verify_inits[0..verify_inits.len() - 1]), // Include a different report share than was included previously [ - &prepare_inits[0..prepare_inits.len() - 1], - &[test_case.prepare_init_generator.next(&0).0], + &verify_inits[0..verify_inits.len() - 1], + &[test_case.verify_init_generator.next(&0).0], ] .concat(), // Include an extra report share than was included previously - [ - prepare_inits, - &[test_case.prepare_init_generator.next(&0).0], - ] - .concat(), + [verify_inits, &[test_case.verify_init_generator.next(&0).0]].concat(), // Reverse the order of the reports - prepare_inits.iter().rev().cloned().collect(), + verify_inits.iter().rev().cloned().collect(), ] { let mutated_aggregation_job_init_req = AggregationJobInitializeReq::new( test_case.aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - mutated_prepare_inits, + mutated_verify_inits, ); let response = put_aggregation_job( &test_case.task, @@ -1089,15 +1082,15 @@ mod tests { let test_case = setup_multi_step_aggregate_init_test().await; // Generate some new reports using the existing reports' metadata, but varying the - // measurement values such that the prepare state computed during aggregation + // measurement values such that the verify state computed during aggregation // initializaton won't match the first aggregation job. - let mutated_prepare_inits = test_case + let mutated_verify_inits = test_case .aggregation_job_init_req - .prepare_inits() + .verify_inits() .iter() .map(|s| { test_case - .prepare_init_generator + .verify_init_generator .next_with_metadata(s.report_share().metadata().clone(), &13) .0 }) @@ -1106,7 +1099,7 @@ mod tests { let mutated_aggregation_job_init_req = AggregationJobInitializeReq::new( test_case.aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - mutated_prepare_inits, + mutated_verify_inits, ); let response = put_aggregation_job( @@ -1136,7 +1129,7 @@ mod tests { Vec::from([ // Barely tolerable. test_case - .prepare_init_generator + .verify_init_generator .next_with_metadata( ReportMetadata::new( random(), @@ -1153,7 +1146,7 @@ mod tests { .0, // Barely intolerable. test_case - .prepare_init_generator + .verify_init_generator .next_with_metadata( ReportMetadata::new( random(), @@ -1183,21 +1176,18 @@ mod tests { assert_eq!(response.status(), StatusCode::CREATED); let aggregation_job_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregation_job_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); assert_eq!( - prepare_resps.len(), - test_case.aggregation_job_init_req.prepare_inits().len(), + verify_resps.len(), + test_case.aggregation_job_init_req.verify_inits().len(), ); + assert_matches!(verify_resps[0].result(), &VerifyStepResult::Continue { .. }); assert_matches!( - prepare_resps[0].result(), - &PrepareStepResult::Continue { .. } - ); - assert_matches!( - prepare_resps[1].result(), - &PrepareStepResult::Reject(ReportError::ReportTooEarly) + verify_resps[1].result(), + &VerifyStepResult::Reject(ReportError::ReportTooEarly) ); } @@ -1241,7 +1231,7 @@ mod tests { let vdaf = dummy::Vdaf::new(1); let aggregation_param = dummy::AggregationParam(0); - let prepare_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), keypair.config().clone(), @@ -1256,7 +1246,7 @@ mod tests { PartialBatchSelector::new_time_interval(), Vec::from([ // Report with timestamp before task end should be accepted. - prepare_init_generator + verify_init_generator .next_with_metadata( ReportMetadata::new( random(), @@ -1267,7 +1257,7 @@ mod tests { ) .0, // Report with timestamp after task end should be rejected. - prepare_init_generator + verify_init_generator .next_with_metadata( ReportMetadata::new( random(), @@ -1278,7 +1268,7 @@ mod tests { ) .0, // Report with timestamp exactly at task end should also be rejected. - prepare_init_generator + verify_init_generator .next_with_metadata( ReportMetadata::new(random(), task_end_time, Vec::new()), &0, @@ -1297,25 +1287,22 @@ mod tests { assert_eq!(response.status(), StatusCode::CREATED); let aggregation_job_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregation_job_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); assert_eq!( - prepare_resps.len(), - aggregation_job_init_req.prepare_inits().len(), - ); - assert_matches!( - prepare_resps[0].result(), - &PrepareStepResult::Continue { .. } + verify_resps.len(), + aggregation_job_init_req.verify_inits().len(), ); + assert_matches!(verify_resps[0].result(), &VerifyStepResult::Continue { .. }); assert_matches!( - prepare_resps[1].result(), - &PrepareStepResult::Reject(ReportError::TaskExpired) + verify_resps[1].result(), + &VerifyStepResult::Reject(ReportError::TaskExpired) ); assert_matches!( - prepare_resps[2].result(), - &PrepareStepResult::Reject(ReportError::TaskExpired) + verify_resps[2].result(), + &VerifyStepResult::Reject(ReportError::TaskExpired) ); } @@ -1385,7 +1372,7 @@ mod tests { let vdaf = dummy::Vdaf::new(1); let aggregation_param = dummy::AggregationParam(0); - let prepare_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), keypair.config().clone(), @@ -1400,7 +1387,7 @@ mod tests { PartialBatchSelector::new_time_interval(), Vec::from([ // Report with timestamp after task start should be accepted. - prepare_init_generator + verify_init_generator .next_with_metadata( ReportMetadata::new( random(), @@ -1411,7 +1398,7 @@ mod tests { ) .0, // Report with timestamp before task start should be rejected. - prepare_init_generator + verify_init_generator .next_with_metadata( ReportMetadata::new( random(), @@ -1434,21 +1421,18 @@ mod tests { assert_eq!(response.status(), StatusCode::CREATED); let aggregation_job_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregation_job_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); assert_eq!( - prepare_resps.len(), - aggregation_job_init_req.prepare_inits().len(), - ); - assert_matches!( - prepare_resps[0].result(), - &PrepareStepResult::Continue { .. } + verify_resps.len(), + aggregation_job_init_req.verify_inits().len(), ); + assert_matches!(verify_resps[0].result(), &VerifyStepResult::Continue { .. }); assert_matches!( - prepare_resps[1].result(), - &PrepareStepResult::Reject(ReportError::TaskNotStarted) + verify_resps[1].result(), + &VerifyStepResult::Reject(ReportError::TaskNotStarted) ); } @@ -1461,7 +1445,7 @@ mod tests { let wrong_query = AggregationJobInitializeReq::new( test_case.aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(random()), - test_case.aggregation_job_init_req.prepare_inits().to_vec(), + test_case.aggregation_job_init_req.verify_inits().to_vec(), ); let req = Request::builder() diff --git a/aggregator/src/aggregator/aggregation_job_writer.rs b/aggregator/src/aggregator/aggregation_job_writer.rs index f0d86fba4..5b97cf9a5 100644 --- a/aggregator/src/aggregator/aggregation_job_writer.rs +++ b/aggregator/src/aggregator/aggregation_job_writer.rs @@ -21,8 +21,8 @@ use janus_aggregator_core::{ }; use janus_core::{report_id::ReportIdChecksumExt as _, time::Clock, vdaf::VdafInstance}; use janus_messages::{ - AggregationJobId, Interval, PrepareResp, PrepareStepResult, ReportError, ReportId, - ReportIdChecksum, Time, + AggregationJobId, Interval, ReportError, ReportId, ReportIdChecksum, Time, VerifyResp, + VerifyStepResult, }; use opentelemetry::{ KeyValue, @@ -166,10 +166,10 @@ where /// operation (aggregation into a collected batch is not allowed). These report aggregations /// will be written with a `Failed(BatchCollected)` state. /// - /// A map from aggregation job ID to the associated preparation responses (if any) will be + /// A map from aggregation job ID to the associated verification responses (if any) will be /// returned, along with aggregation counters indicating occurrences of aggregation-related - /// events. In the case that a report aggregation was unaggregatable, these preparation - /// responses will be updated from the preparation responses originally included in the given + /// events. In the case that a report aggregation was unaggregatable, these verification + /// responses will be updated from the verification responses originally included in the given /// report aggregations. #[tracing::instrument( name = "AggregationJobWriter::write", @@ -180,7 +180,7 @@ where &self, tx: &Transaction<'_, C>, vdaf: Arc, - ) -> Result>, Error> { + ) -> Result>, Error> { // Read & update state based on the aggregation jobs to be written. We will read batch // aggregations, then update aggregation jobs/report aggregations/batch aggregations based // on the state we read. @@ -241,7 +241,7 @@ where .report_aggregations .iter() .map(AsRef::as_ref) - .filter_map(RA::Borrowed::last_prep_resp) + .filter_map(RA::Borrowed::last_verify_resp) .cloned() .collect(), ) @@ -732,8 +732,8 @@ where #[cfg(feature = "test-util")] Fake { rounds: _ } - | FakeFailsPrepInit - | FakeFailsPrepStep => metrics + | FakeFailsVerifyInit + | FakeFailsVerifyStep => metrics .aggregated_report_share_dimension_histogram .record(0, &[KeyValue::new("type", "Fake")]), _ => metrics @@ -757,11 +757,11 @@ where }); self.writer .task_aggregation_counters - .increment_with_report_error(ReportError::VdafPrepError); + .increment_with_report_error(ReportError::VdafVerifyError); *report_aggregation.to_mut() = report_aggregation .as_ref() .clone() - .with_failure(ReportError::VdafPrepError); + .with_failure(ReportError::VdafVerifyError); } } } @@ -917,8 +917,8 @@ pub trait ReportAggregationUpdate Self; - /// Returns the last preparation response from this report aggregation, if any. - fn last_prep_resp(&self) -> Option<&PrepareResp>; + /// Returns the last verification response from this report aggregation, if any. + fn last_verify_resp(&self) -> Option<&VerifyResp>; /// Write this report aggregation to the datastore. This must be used only with newly-created /// report aggregations. @@ -972,13 +972,13 @@ impl> ReportAggregationUpd .with_state(ReportAggregationState::Failed { report_error }); // This check effectively checks if we are the Helper. (The Helper will always set - // last_prep_resp for all non-failed report aggregations, and most failed report + // last_verify_resp for all non-failed report aggregations, and most failed report // aggregations [everything but ReportDropped].) - if report_aggregation.last_prep_resp().is_some() { + if report_aggregation.last_verify_resp().is_some() { let report_id = *report_aggregation.report_id(); - report_aggregation = report_aggregation.with_last_prep_resp(Some(PrepareResp::new( + report_aggregation = report_aggregation.with_last_verify_resp(Some(VerifyResp::new( report_id, - PrepareStepResult::Reject(report_error), + VerifyStepResult::Reject(report_error), ))); } @@ -988,9 +988,9 @@ impl> ReportAggregationUpd } } - /// Returns the last preparation response from this report aggregation, if any. - fn last_prep_resp(&self) -> Option<&PrepareResp> { - self.report_aggregation.last_prep_resp() + /// Returns the last verification response from this report aggregation, if any. + fn last_verify_resp(&self) -> Option<&VerifyResp> { + self.report_aggregation.last_verify_resp() } async fn write_new(&self, tx: &Transaction) -> Result<(), Error> { @@ -1037,8 +1037,8 @@ where self.with_state(ReportAggregationMetadataState::Failed { report_error }) } - /// Returns the last preparation response from this report aggregation, if any. - fn last_prep_resp(&self) -> Option<&PrepareResp> { + /// Returns the last verification response from this report aggregation, if any. + fn last_verify_resp(&self) -> Option<&VerifyResp> { None } @@ -1089,9 +1089,9 @@ where Self::Owned(self.into_owned().with_failure(report_error)) } - /// Returns the last preparation response from this report aggregation, if any. - fn last_prep_resp(&self) -> Option<&PrepareResp> { - self.as_ref().last_prep_resp() + /// Returns the last verification response from this report aggregation, if any. + fn last_verify_resp(&self) -> Option<&VerifyResp> { + self.as_ref().last_verify_resp() } async fn write_new(&self, tx: &Transaction) -> Result<(), Error> { diff --git a/aggregator/src/aggregator/error.rs b/aggregator/src/aggregator/error.rs index 41de73152..27ae6bd11 100644 --- a/aggregator/src/aggregator/error.rs +++ b/aggregator/src/aggregator/error.rs @@ -406,15 +406,15 @@ pub(crate) fn handle_ping_pong_error( let (error_desc, value) = match ping_pong_error { PingPongError::VdafVerifyInit(_) => ( "Couldn't helper_initialize report share".to_string(), - "prepare_init_failure".to_string(), + "verify_init_failure".to_string(), ), PingPongError::VdafVerifierSharesToMessage(_) => ( "Couldn't compute prepare message".to_string(), - "prepare_message_failure".to_string(), + "verify_message_failure".to_string(), ), PingPongError::VdafVerifyNext(_) => ( - "Prepare next failed".to_string(), - "prepare_next_failure".to_string(), + "Verify next failed".to_string(), + "verify_next_failure".to_string(), ), PingPongError::CodecVerifierShare(_) => ( format!("Couldn't decode {peer_role} prepare share"), @@ -442,6 +442,6 @@ pub(crate) fn handle_ping_pong_error( aggregate_step_failure_counter.add(1, &[KeyValue::new("type", value)]); // Per DAP, any occurrence of state Rejected() from a ping-pong routime is translated to - // VdafPrepError - ReportError::VdafPrepError + // VdafVerifyError + ReportError::VdafVerifyError } diff --git a/aggregator/src/aggregator/http_handlers/tests/aggregate_share.rs b/aggregator/src/aggregator/http_handlers/tests/aggregate_share.rs index 4a5654237..974c73a9d 100644 --- a/aggregator/src/aggregator/http_handlers/tests/aggregate_share.rs +++ b/aggregator/src/aggregator/http_handlers/tests/aggregate_share.rs @@ -67,7 +67,7 @@ async fn aggregate_share_request_to_leader() { .. } = HttpHandlerTest::new().await; - // Prepare parameters. + // Set up parameters. let task = TaskBuilder::new( BatchMode::TimeInterval, AggregationMode::Synchronous, @@ -112,7 +112,7 @@ async fn aggregate_share_request_invalid_batch_interval() { .. } = HttpHandlerTest::new().await; - // Prepare parameters. + // Set up parameters. let time_precision = TimePrecision::from_hours(8); let task = TaskBuilder::new( BatchMode::TimeInterval, diff --git a/aggregator/src/aggregator/http_handlers/tests/aggregation_job_continue.rs b/aggregator/src/aggregator/http_handlers/tests/aggregation_job_continue.rs index 3908634d0..8e103e182 100644 --- a/aggregator/src/aggregator/http_handlers/tests/aggregation_job_continue.rs +++ b/aggregator/src/aggregator/http_handlers/tests/aggregation_job_continue.rs @@ -22,8 +22,8 @@ use janus_core::{ }; use janus_messages::{ AggregationJobContinueReq, AggregationJobResp, AggregationJobStep, Duration, Interval, - PrepareContinue, PrepareResp, PrepareStepResult, ReportError, ReportId, ReportIdChecksum, - ReportMetadata, Role, Time, TimePrecision, batch_mode::TimeInterval, + ReportError, ReportId, ReportIdChecksum, ReportMetadata, Role, Time, TimePrecision, + VerifyContinue, VerifyResp, VerifyStepResult, batch_mode::TimeInterval, }; use prio::{ topology::ping_pong::PingPongMessage, @@ -84,10 +84,8 @@ async fn aggregate_continue_sync() { report_metadata_0.id(), &measurement, ); - let helper_prep_state_0 = transcript_0.helper_prepare_transitions[0].prepare_state(); - let leader_prep_message_0 = transcript_0.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_0 = transcript_0.helper_verify_transitions[0].verify_state(); + let leader_verify_message_0 = transcript_0.leader_verify_transitions[1].message().unwrap(); let report_share_0 = generate_helper_report_share::( *task.id(), report_metadata_0.clone(), @@ -112,7 +110,7 @@ async fn aggregate_continue_sync() { &measurement, ); - let helper_prep_state_1 = transcript_1.helper_prepare_transitions[0].prepare_state(); + let helper_verify_state_1 = transcript_1.helper_verify_transitions[0].verify_state(); let report_share_1 = generate_helper_report_share::( *task.id(), report_metadata_1.clone(), @@ -137,10 +135,8 @@ async fn aggregate_continue_sync() { report_metadata_2.id(), &measurement, ); - let helper_prep_state_2 = transcript_2.helper_prepare_transitions[0].prepare_state(); - let leader_prep_message_2 = transcript_2.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_2 = transcript_2.helper_verify_transitions[0].verify_state(); + let leader_verify_message_2 = transcript_2.leader_verify_transitions[1].message().unwrap(); let report_share_2 = generate_helper_report_share::( *task.id(), report_metadata_2.clone(), @@ -156,9 +152,9 @@ async fn aggregate_continue_sync() { let report_share_0 = report_share_0.clone(); let report_share_1 = report_share_1.clone(); let report_share_2 = report_share_2.clone(); - let helper_prep_state_0 = *helper_prep_state_0; - let helper_prep_state_1 = *helper_prep_state_1; - let helper_prep_state_2 = *helper_prep_state_2; + let helper_verify_state_0 = *helper_verify_state_0; + let helper_verify_state_1 = *helper_verify_state_1; + let helper_verify_state_2 = *helper_verify_state_2; let report_metadata_0 = report_metadata_0.clone(); let report_metadata_1 = report_metadata_1.clone(); let report_metadata_2 = report_metadata_2.clone(); @@ -208,7 +204,7 @@ async fn aggregate_continue_sync() { 0, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_0, + verify_state: helper_verify_state_0, }, )) .await @@ -221,7 +217,7 @@ async fn aggregate_continue_sync() { 1, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_1, + verify_state: helper_verify_state_1, }, )) .await @@ -234,7 +230,7 @@ async fn aggregate_continue_sync() { 2, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_2, + verify_state: helper_verify_state_2, }, )) .await @@ -270,8 +266,8 @@ async fn aggregate_continue_sync() { let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), Vec::from([ - PrepareContinue::new(*report_metadata_0.id(), leader_prep_message_0.clone()), - PrepareContinue::new(*report_metadata_2.id(), leader_prep_message_2.clone()), + VerifyContinue::new(*report_metadata_0.id(), leader_verify_message_0.clone()), + VerifyContinue::new(*report_metadata_2.id(), leader_verify_message_2.clone()), ]), ); @@ -283,11 +279,11 @@ async fn aggregate_continue_sync() { assert_eq!( aggregate_resp, Some(AggregationJobResp { - prepare_resps: Vec::from([ - PrepareResp::new(*report_metadata_0.id(), PrepareStepResult::Finished), - PrepareResp::new( + verify_resps: Vec::from([ + VerifyResp::new(*report_metadata_0.id(), VerifyStepResult::Finished), + VerifyResp::new( *report_metadata_2.id(), - PrepareStepResult::Reject(ReportError::BatchCollected), + VerifyStepResult::Reject(ReportError::BatchCollected), ) ]) }) @@ -343,9 +339,9 @@ async fn aggregate_continue_sync() { *report_metadata_0.id(), *report_metadata_0.time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_metadata_0.id(), - PrepareStepResult::Finished + VerifyStepResult::Finished )), ReportAggregationState::Finished, ), @@ -366,9 +362,9 @@ async fn aggregate_continue_sync() { *report_metadata_2.id(), *report_metadata_2.time(), 2, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_metadata_2.id(), - PrepareStepResult::Reject(ReportError::BatchCollected) + VerifyStepResult::Reject(ReportError::BatchCollected) )), ReportAggregationState::Failed { report_error: ReportError::BatchCollected @@ -426,10 +422,8 @@ async fn aggregate_continue_async() { report_metadata_0.id(), &measurement, ); - let helper_prep_state_0 = transcript_0.helper_prepare_transitions[0].prepare_state(); - let leader_prep_message_0 = transcript_0.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_0 = transcript_0.helper_verify_transitions[0].verify_state(); + let leader_verify_message_0 = transcript_0.leader_verify_transitions[1].message().unwrap(); let report_share_0 = generate_helper_report_share::( *task.id(), report_metadata_0.clone(), @@ -454,7 +448,7 @@ async fn aggregate_continue_async() { &measurement, ); - let helper_prep_state_1 = transcript_1.helper_prepare_transitions[0].prepare_state(); + let helper_verify_state_1 = transcript_1.helper_verify_transitions[0].verify_state(); let report_share_1 = generate_helper_report_share::( *task.id(), report_metadata_1.clone(), @@ -469,8 +463,8 @@ async fn aggregate_continue_async() { let helper_task = helper_task.clone(); let report_share_0 = report_share_0.clone(); let report_share_1 = report_share_1.clone(); - let helper_prep_state_0 = *helper_prep_state_0; - let helper_prep_state_1 = *helper_prep_state_1; + let helper_verify_state_0 = *helper_verify_state_0; + let helper_verify_state_1 = *helper_verify_state_1; let report_metadata_0 = report_metadata_0.clone(); let report_metadata_1 = report_metadata_1.clone(); @@ -512,7 +506,7 @@ async fn aggregate_continue_async() { 0, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_0, + verify_state: helper_verify_state_0, }, )) .await @@ -525,7 +519,7 @@ async fn aggregate_continue_async() { 1, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_1, + verify_state: helper_verify_state_1, }, )) .await @@ -539,9 +533,9 @@ async fn aggregate_continue_async() { let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( *report_metadata_0.id(), - leader_prep_message_0.clone(), + leader_verify_message_0.clone(), )]), ); @@ -604,10 +598,10 @@ async fn aggregate_continue_async() { 0, None, ReportAggregationState::HelperContinueProcessing { - prepare_state: *helper_prep_state_0, - prepare_continue: PrepareContinue::new( + verify_state: *helper_verify_state_0, + verify_continue: VerifyContinue::new( *report_metadata_0.id(), - leader_prep_message_0.clone() + leader_verify_message_0.clone() ), }, ), @@ -678,10 +672,8 @@ async fn aggregate_continue_accumulate_batch_aggregation() { report_metadata_0.id(), &measurement, ); - let helper_prep_state_0 = transcript_0.helper_prepare_transitions[0].prepare_state(); - let ping_pong_leader_message_0 = transcript_0.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_0 = transcript_0.helper_verify_transitions[0].verify_state(); + let ping_pong_leader_message_0 = transcript_0.leader_verify_transitions[1].message().unwrap(); let report_share_0 = generate_helper_report_share::( *task.id(), report_metadata_0.clone(), @@ -705,10 +697,8 @@ async fn aggregate_continue_accumulate_batch_aggregation() { report_metadata_1.id(), &measurement, ); - let helper_prep_state_1 = transcript_1.helper_prepare_transitions[0].prepare_state(); - let ping_pong_leader_message_1 = transcript_1.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_1 = transcript_1.helper_verify_transitions[0].verify_state(); + let ping_pong_leader_message_1 = transcript_1.leader_verify_transitions[1].message().unwrap(); let report_share_1 = generate_helper_report_share::( *task.id(), report_metadata_1.clone(), @@ -735,10 +725,8 @@ async fn aggregate_continue_accumulate_batch_aggregation() { report_metadata_2.id(), &measurement, ); - let helper_prep_state_2 = transcript_2.helper_prepare_transitions[0].prepare_state(); - let ping_pong_leader_message_2 = transcript_2.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_2 = transcript_2.helper_verify_transitions[0].verify_state(); + let ping_pong_leader_message_2 = transcript_2.leader_verify_transitions[1].message().unwrap(); let report_share_2 = generate_helper_report_share::( *task.id(), report_metadata_2.clone(), @@ -784,9 +772,9 @@ async fn aggregate_continue_accumulate_batch_aggregation() { let report_share_0 = report_share_0.clone(); let report_share_1 = report_share_1.clone(); let report_share_2 = report_share_2.clone(); - let helper_prep_state_0 = *helper_prep_state_0; - let helper_prep_state_1 = *helper_prep_state_1; - let helper_prep_state_2 = *helper_prep_state_2; + let helper_verify_state_0 = *helper_verify_state_0; + let helper_verify_state_1 = *helper_verify_state_1; + let helper_verify_state_2 = *helper_verify_state_2; let report_metadata_0 = report_metadata_0.clone(); let report_metadata_1 = report_metadata_1.clone(); let report_metadata_2 = report_metadata_2.clone(); @@ -837,7 +825,7 @@ async fn aggregate_continue_accumulate_batch_aggregation() { 0, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_0, + verify_state: helper_verify_state_0, }, )) .await @@ -850,7 +838,7 @@ async fn aggregate_continue_accumulate_batch_aggregation() { 1, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_1, + verify_state: helper_verify_state_1, }, )) .await @@ -863,7 +851,7 @@ async fn aggregate_continue_accumulate_batch_aggregation() { 2, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_2, + verify_state: helper_verify_state_2, }, )) .await @@ -903,9 +891,9 @@ async fn aggregate_continue_accumulate_batch_aggregation() { let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), Vec::from([ - PrepareContinue::new(*report_metadata_0.id(), ping_pong_leader_message_0.clone()), - PrepareContinue::new(*report_metadata_1.id(), ping_pong_leader_message_1.clone()), - PrepareContinue::new(*report_metadata_2.id(), ping_pong_leader_message_2.clone()), + VerifyContinue::new(*report_metadata_0.id(), ping_pong_leader_message_0.clone()), + VerifyContinue::new(*report_metadata_1.id(), ping_pong_leader_message_1.clone()), + VerifyContinue::new(*report_metadata_2.id(), ping_pong_leader_message_2.clone()), ]), ); @@ -1024,10 +1012,8 @@ async fn aggregate_continue_accumulate_batch_aggregation() { report_metadata_3.id(), &measurement, ); - let helper_prep_state_3 = transcript_3.helper_prepare_transitions[0].prepare_state(); - let ping_pong_leader_message_3 = transcript_3.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_3 = transcript_3.helper_verify_transitions[0].verify_state(); + let ping_pong_leader_message_3 = transcript_3.leader_verify_transitions[1].message().unwrap(); let report_share_3 = generate_helper_report_share::( *task.id(), report_metadata_3.clone(), @@ -1054,10 +1040,8 @@ async fn aggregate_continue_accumulate_batch_aggregation() { report_metadata_4.id(), &measurement, ); - let helper_prep_state_4 = transcript_4.helper_prepare_transitions[0].prepare_state(); - let ping_pong_leader_message_4 = transcript_4.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_4 = transcript_4.helper_verify_transitions[0].verify_state(); + let ping_pong_leader_message_4 = transcript_4.leader_verify_transitions[1].message().unwrap(); let report_share_4 = generate_helper_report_share::( *task.id(), report_metadata_4.clone(), @@ -1084,10 +1068,8 @@ async fn aggregate_continue_accumulate_batch_aggregation() { report_metadata_5.id(), &measurement, ); - let helper_prep_state_5 = transcript_5.helper_prepare_transitions[0].prepare_state(); - let ping_pong_leader_message_5 = transcript_5.leader_prepare_transitions[1] - .message() - .unwrap(); + let helper_verify_state_5 = transcript_5.helper_verify_transitions[0].verify_state(); + let ping_pong_leader_message_5 = transcript_5.leader_verify_transitions[1].message().unwrap(); let report_share_5 = generate_helper_report_share::( *task.id(), report_metadata_5.clone(), @@ -1103,9 +1085,9 @@ async fn aggregate_continue_accumulate_batch_aggregation() { let report_share_3 = report_share_3.clone(); let report_share_4 = report_share_4.clone(); let report_share_5 = report_share_5.clone(); - let helper_prep_state_3 = *helper_prep_state_3; - let helper_prep_state_4 = *helper_prep_state_4; - let helper_prep_state_5 = *helper_prep_state_5; + let helper_verify_state_3 = *helper_verify_state_3; + let helper_verify_state_4 = *helper_verify_state_4; + let helper_verify_state_5 = *helper_verify_state_5; let report_metadata_3 = report_metadata_3.clone(); let report_metadata_4 = report_metadata_4.clone(); let report_metadata_5 = report_metadata_5.clone(); @@ -1153,7 +1135,7 @@ async fn aggregate_continue_accumulate_batch_aggregation() { 3, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_3, + verify_state: helper_verify_state_3, }, )) .await @@ -1166,7 +1148,7 @@ async fn aggregate_continue_accumulate_batch_aggregation() { 4, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_4, + verify_state: helper_verify_state_4, }, )) .await @@ -1179,7 +1161,7 @@ async fn aggregate_continue_accumulate_batch_aggregation() { 5, None, ReportAggregationState::HelperContinue { - prepare_state: helper_prep_state_5, + verify_state: helper_verify_state_5, }, )) .await @@ -1194,9 +1176,9 @@ async fn aggregate_continue_accumulate_batch_aggregation() { let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), Vec::from([ - PrepareContinue::new(*report_metadata_3.id(), ping_pong_leader_message_3.clone()), - PrepareContinue::new(*report_metadata_4.id(), ping_pong_leader_message_4.clone()), - PrepareContinue::new(*report_metadata_5.id(), ping_pong_leader_message_5.clone()), + VerifyContinue::new(*report_metadata_3.id(), ping_pong_leader_message_3.clone()), + VerifyContinue::new(*report_metadata_4.id(), ping_pong_leader_message_4.clone()), + VerifyContinue::new(*report_metadata_5.id(), ping_pong_leader_message_5.clone()), ]), ); @@ -1316,7 +1298,7 @@ async fn aggregate_continue_leader_sends_non_continue_or_finish_transition() { .. } = HttpHandlerTest::new().await; - // Prepare parameters. + // Set up parameters. let time_precision = TimePrecision::from_seconds(10); let task = TaskBuilder::new( BatchMode::TimeInterval, @@ -1379,7 +1361,7 @@ async fn aggregate_continue_leader_sends_non_continue_or_finish_transition() { 0, None, ReportAggregationState::HelperContinue { - prepare_state: *transcript.helper_prepare_transitions[0].prepare_state(), + verify_state: *transcript.helper_verify_transitions[0].verify_state(), }, )) .await @@ -1391,7 +1373,7 @@ async fn aggregate_continue_leader_sends_non_continue_or_finish_transition() { // Make request. let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( *report_metadata.id(), // An AggregationJobContinueReq should only ever contain Continue or Finished PingPongMessage::Initialize { @@ -1401,27 +1383,27 @@ async fn aggregate_continue_leader_sends_non_continue_or_finish_transition() { ); let resp = post_aggregation_job_and_decode(&task, &aggregation_job_id, &request, &router).await; - let prepare_resps = - assert_matches!(resp, Some(AggregationJobResp{prepare_resps}) => prepare_resps); - assert_eq!(prepare_resps.len(), 1); + let verify_resps = + assert_matches!(resp, Some(AggregationJobResp{verify_resps}) => verify_resps); + assert_eq!(verify_resps.len(), 1); assert_eq!( - prepare_resps[0], - PrepareResp::new( + verify_resps[0], + VerifyResp::new( *report_metadata.id(), - PrepareStepResult::Reject(ReportError::VdafPrepError), + VerifyStepResult::Reject(ReportError::VdafVerifyError), ) ); assert_task_aggregation_counter( &datastore, *task.id(), - TaskAggregationCounter::default().with_vdaf_prep_error(1), + TaskAggregationCounter::default().with_vdaf_verify_error(1), ) .await; } #[tokio::test] -async fn aggregate_continue_prep_step_fails() { +async fn aggregate_continue_verify_step_fails() { let HttpHandlerTest { ephemeral_datastore: _ephemeral_datastore, datastore, @@ -1430,7 +1412,7 @@ async fn aggregate_continue_prep_step_fails() { .. } = HttpHandlerTest::new().await; - // Prepare parameters. + // Set up parameters. let time_precision = TimePrecision::from_seconds(10); let task = TaskBuilder::new( BatchMode::TimeInterval, @@ -1502,7 +1484,7 @@ async fn aggregate_continue_prep_step_fails() { 0, None, ReportAggregationState::HelperContinue { - prepare_state: *transcript.helper_prepare_transitions[0].prepare_state(), + verify_state: *transcript.helper_verify_transitions[0].verify_state(), }, )) .await @@ -1514,7 +1496,7 @@ async fn aggregate_continue_prep_step_fails() { // Make request. let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( *report_metadata.id(), PingPongMessage::Continue { verifier_message: Vec::new(), @@ -1528,9 +1510,9 @@ async fn aggregate_continue_prep_step_fails() { assert_eq!( aggregate_resp, Some(AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report_metadata.id(), - PrepareStepResult::Reject(ReportError::VdafPrepError), + VerifyStepResult::Reject(ReportError::VdafVerifyError), )]) }) ); @@ -1587,12 +1569,12 @@ async fn aggregate_continue_prep_step_fails() { *report_metadata.id(), *report_metadata.time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_metadata.id(), - PrepareStepResult::Reject(ReportError::VdafPrepError) + VerifyStepResult::Reject(ReportError::VdafVerifyError) )), ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError + report_error: ReportError::VdafVerifyError }, ) ); @@ -1600,7 +1582,7 @@ async fn aggregate_continue_prep_step_fails() { assert_task_aggregation_counter( &datastore, *task.id(), - TaskAggregationCounter::default().with_vdaf_prep_error(1), + TaskAggregationCounter::default().with_vdaf_verify_error(1), ) .await; } @@ -1614,7 +1596,7 @@ async fn aggregate_continue_unexpected_transition() { .. } = HttpHandlerTest::new().await; - // Prepare parameters. + // Set up parameters. let time_precision = TimePrecision::from_seconds(10); let task = TaskBuilder::new( BatchMode::TimeInterval, @@ -1676,7 +1658,7 @@ async fn aggregate_continue_unexpected_transition() { 0, None, ReportAggregationState::HelperContinue { - prepare_state: *transcript.helper_prepare_transitions[0].prepare_state(), + verify_state: *transcript.helper_verify_transitions[0].verify_state(), }, )) .await @@ -1688,7 +1670,7 @@ async fn aggregate_continue_unexpected_transition() { // Make request. let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( ReportId::from( [16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], // not the same as above ), @@ -1707,7 +1689,7 @@ async fn aggregate_continue_unexpected_transition() { StatusCode::BAD_REQUEST, "urn:ietf:params:ppm:dap:error:invalidMessage", "The message type for a response was incorrect or the payload was malformed.", - Some("leader sent unexpected, duplicate, or out-of-order prepare steps"), + Some("leader sent unexpected, duplicate, or out-of-order verify steps"), None, ) .await; @@ -1725,7 +1707,7 @@ async fn aggregate_continue_out_of_order_transition() { .. } = HttpHandlerTest::new().await; - // Prepare parameters. + // Set up parameters. let time_precision = TimePrecision::from_seconds(10); let task = TaskBuilder::new( BatchMode::TimeInterval, @@ -1813,7 +1795,7 @@ async fn aggregate_continue_out_of_order_transition() { 0, None, ReportAggregationState::HelperContinue { - prepare_state: *transcript_0.helper_prepare_transitions[0].prepare_state(), + verify_state: *transcript_0.helper_verify_transitions[0].verify_state(), }, )) .await @@ -1826,7 +1808,7 @@ async fn aggregate_continue_out_of_order_transition() { 1, None, ReportAggregationState::HelperContinue { - prepare_state: *transcript_1.helper_prepare_transitions[0].prepare_state(), + verify_state: *transcript_1.helper_verify_transitions[0].verify_state(), }, )) .await @@ -1840,14 +1822,14 @@ async fn aggregate_continue_out_of_order_transition() { AggregationJobStep::from(1), Vec::from([ // Report IDs are in opposite order to what was stored in the datastore. - PrepareContinue::new( + VerifyContinue::new( *report_metadata_1.id(), PingPongMessage::Continue { verifier_message: Vec::new(), verifier_share: Vec::new(), }, ), - PrepareContinue::new( + VerifyContinue::new( *report_metadata_0.id(), PingPongMessage::Continue { verifier_message: Vec::new(), @@ -1864,7 +1846,7 @@ async fn aggregate_continue_out_of_order_transition() { StatusCode::BAD_REQUEST, "urn:ietf:params:ppm:dap:error:invalidMessage", "The message type for a response was incorrect or the payload was malformed.", - Some("leader sent unexpected, duplicate, or out-of-order prepare steps"), + Some("leader sent unexpected, duplicate, or out-of-order verify steps"), None, ) .await; @@ -1882,7 +1864,7 @@ async fn aggregate_continue_for_non_waiting_aggregation() { .. } = HttpHandlerTest::new().await; - // Prepare parameters. + // Set up parameters. let time_precision = TimePrecision::from_seconds(10); let task = TaskBuilder::new( BatchMode::TimeInterval, @@ -1933,7 +1915,7 @@ async fn aggregate_continue_for_non_waiting_aggregation() { 0, None, ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, )) .await @@ -1945,7 +1927,7 @@ async fn aggregate_continue_for_non_waiting_aggregation() { // Make request. let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( ReportId::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), PingPongMessage::Continue { verifier_message: Vec::new(), @@ -1961,7 +1943,7 @@ async fn aggregate_continue_for_non_waiting_aggregation() { StatusCode::BAD_REQUEST, "urn:ietf:params:ppm:dap:error:invalidMessage", "The message type for a response was incorrect or the payload was malformed.", - Some("leader sent prepare step for non-CONTINUE report aggregation"), + Some("leader sent verify step for non-CONTINUE report aggregation"), None, ) .await; diff --git a/aggregator/src/aggregator/http_handlers/tests/aggregation_job_get.rs b/aggregator/src/aggregator/http_handlers/tests/aggregation_job_get.rs index 9e76ab10e..6f89603b8 100644 --- a/aggregator/src/aggregator/http_handlers/tests/aggregation_job_get.rs +++ b/aggregator/src/aggregator/http_handlers/tests/aggregation_job_get.rs @@ -18,8 +18,8 @@ use janus_core::{ vdaf::VdafInstance, }; use janus_messages::{ - AggregationJobId, AggregationJobResp, AggregationJobStep, Interval, MediaType, PrepareInit, - PrepareResp, PrepareStepResult, ReportMetadata, batch_mode::TimeInterval, + AggregationJobId, AggregationJobResp, AggregationJobStep, Interval, MediaType, ReportMetadata, + VerifyInit, VerifyResp, VerifyStepResult, batch_mode::TimeInterval, }; use prio::vdaf::dummy; use rand::random; @@ -32,7 +32,7 @@ use crate::aggregator::{ #[tokio::test] async fn aggregation_job_get_ready() { - // Prepare state. + // Set up state. let HttpHandlerTest { clock, ephemeral_datastore: _ephemeral_datastore, @@ -68,7 +68,7 @@ async fn aggregation_job_get_ready() { report_metadata.id(), &measurement, ); - let helper_message = transcript.helper_prepare_transitions[0].message().unwrap(); + let helper_message = transcript.helper_verify_transitions[0].message().unwrap(); datastore .run_unnamed_tx(|tx| { @@ -105,9 +105,9 @@ async fn aggregation_job_get_ready() { *report_metadata.id(), *report_metadata.time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_metadata.id(), - PrepareStepResult::Continue { + VerifyStepResult::Continue { message: helper_message, }, )), @@ -135,9 +135,9 @@ async fn aggregation_job_get_ready() { assert_eq!( aggregate_resp, AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report_metadata.id(), - PrepareStepResult::Continue { + VerifyStepResult::Continue { message: helper_message.clone(), } )]) @@ -147,7 +147,7 @@ async fn aggregation_job_get_ready() { #[tokio::test] async fn aggregation_job_get_unready() { - // Prepare state. + // Set up state. let HttpHandlerTest { clock, ephemeral_datastore: _ephemeral_datastore, @@ -183,7 +183,7 @@ async fn aggregation_job_get_unready() { report_metadata.id(), &measurement, ); - let leader_message = transcript.leader_prepare_transitions[0].message().unwrap(); + let leader_message = transcript.leader_verify_transitions[0].message().unwrap(); let report_share = generate_helper_report_share::( *task.id(), report_metadata.clone(), @@ -231,7 +231,7 @@ async fn aggregation_job_get_unready() { 0, None, ReportAggregationState::HelperInitProcessing { - prepare_init: PrepareInit::new(report_share, leader_message), + verify_init: VerifyInit::new(report_share, leader_message), require_taskbind_extension: false, }, )) @@ -260,7 +260,7 @@ async fn aggregation_job_get_unready() { #[tokio::test] async fn aggregation_job_get_wrong_step() { - // Prepare state. + // Set up state. let HttpHandlerTest { clock, ephemeral_datastore: _ephemeral_datastore, @@ -296,7 +296,7 @@ async fn aggregation_job_get_wrong_step() { report_metadata.id(), &measurement, ); - let helper_message = transcript.helper_prepare_transitions[0].message().unwrap(); + let helper_message = transcript.helper_verify_transitions[0].message().unwrap(); datastore .run_unnamed_tx(|tx| { @@ -333,9 +333,9 @@ async fn aggregation_job_get_wrong_step() { *report_metadata.id(), *report_metadata.time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_metadata.id(), - PrepareStepResult::Continue { + VerifyStepResult::Continue { message: helper_message, }, )), @@ -365,7 +365,7 @@ async fn aggregation_job_get_wrong_step() { #[tokio::test] async fn aggregation_job_get_missing_step() { - // Prepare state. + // Set up state. let HttpHandlerTest { clock, ephemeral_datastore: _ephemeral_datastore, @@ -401,7 +401,7 @@ async fn aggregation_job_get_missing_step() { report_metadata.id(), &measurement, ); - let helper_message = transcript.helper_prepare_transitions[0].message().unwrap(); + let helper_message = transcript.helper_verify_transitions[0].message().unwrap(); datastore .run_unnamed_tx(|tx| { @@ -438,9 +438,9 @@ async fn aggregation_job_get_missing_step() { *report_metadata.id(), *report_metadata.time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_metadata.id(), - PrepareStepResult::Continue { + VerifyStepResult::Continue { message: helper_message, }, )), @@ -464,7 +464,7 @@ async fn aggregation_job_get_missing_step() { #[tokio::test] async fn aggregation_job_get_sync() { - // Prepare state. + // Set up state. let HttpHandlerTest { clock, ephemeral_datastore: _ephemeral_datastore, @@ -500,7 +500,7 @@ async fn aggregation_job_get_sync() { report_metadata.id(), &measurement, ); - let helper_message = transcript.helper_prepare_transitions[0].message().unwrap(); + let helper_message = transcript.helper_verify_transitions[0].message().unwrap(); datastore .run_unnamed_tx(|tx| { @@ -537,9 +537,9 @@ async fn aggregation_job_get_sync() { *report_metadata.id(), *report_metadata.time(), 0, - Some(PrepareResp::new( + Some(VerifyResp::new( *report_metadata.id(), - PrepareStepResult::Continue { + VerifyStepResult::Continue { message: helper_message, }, )), diff --git a/aggregator/src/aggregator/http_handlers/tests/aggregation_job_init.rs b/aggregator/src/aggregator/http_handlers/tests/aggregation_job_init.rs index f13979543..5d703a35a 100644 --- a/aggregator/src/aggregator/http_handlers/tests/aggregation_job_init.rs +++ b/aggregator/src/aggregator/http_handlers/tests/aggregation_job_init.rs @@ -25,8 +25,8 @@ use janus_core::{ use janus_messages::{ AggregateShareReq, AggregationJobId, AggregationJobInitializeReq, AggregationJobResp, BatchId, BatchSelector, Extension, ExtensionType, HpkeCiphertext, HpkeConfigId, InputShareAad, Interval, - MediaType, PartialBatchSelector, PrepareInit, PrepareStepResult, ReportError, ReportIdChecksum, - ReportMetadata, ReportShare, Role, Time, + MediaType, PartialBatchSelector, ReportError, ReportIdChecksum, ReportMetadata, ReportShare, + Role, Time, VerifyInit, VerifyStepResult, batch_mode::{LeaderSelected, TimeInterval}, }; use prio::{codec::Encode, vdaf::dummy}; @@ -36,7 +36,7 @@ use tower::ServiceExt; use crate::aggregator::{ BatchAggregationsIterator, - aggregation_job_init::test_util::{PrepareInitGenerator, put_aggregation_job}, + aggregation_job_init::test_util::{VerifyInitGenerator, put_aggregation_job}, http_handlers::{ test_util::{ HttpHandlerTest, decode_response_body, take_problem_details, take_response_body, @@ -229,7 +229,7 @@ async fn aggregate_init_sync() { let vdaf = dummy::Vdaf::new(1); let verify_key: VerifyKey<0> = task.vdaf_verify_key().unwrap(); let measurement = 0; - let prep_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), hpke_keypair.config().clone(), @@ -237,13 +237,13 @@ async fn aggregate_init_sync() { dummy::AggregationParam(0), ); - // prepare_init_0 is a "happy path" report. - let (prepare_init_0, transcript_0) = prep_init_generator.next(&measurement); + // verify_init_0 is a "happy path" report. + let (verify_init_0, transcript_0) = verify_init_generator.next(&measurement); // report_share_1 fails decryption. - let (prepare_init_1, transcript_1) = prep_init_generator.next(&measurement); + let (verify_init_1, transcript_1) = verify_init_generator.next(&measurement); - let encrypted_input_share = prepare_init_1.report_share().encrypted_input_share(); + let encrypted_input_share = verify_init_1.report_share().encrypted_input_share(); let mut corrupted_payload = encrypted_input_share.payload().to_vec(); corrupted_payload[0] ^= 0xFF; let corrupted_input_share = HpkeCiphertext::new( @@ -252,38 +252,38 @@ async fn aggregate_init_sync() { corrupted_payload, ); - let prepare_init_1 = PrepareInit::new( + let verify_init_1 = VerifyInit::new( ReportShare::new( - prepare_init_1.report_share().metadata().clone(), + verify_init_1.report_share().metadata().clone(), transcript_1.public_share.get_encoded().unwrap(), corrupted_input_share, ), - prepare_init_1.message().clone(), + verify_init_1.message().clone(), ); - // prepare_init_2 fails decoding due to an issue with the input share. - let (prepare_init_2, transcript_2) = prep_init_generator.next(&measurement); + // verify_init_2 fails decoding due to an issue with the input share. + let (verify_init_2, transcript_2) = verify_init_generator.next(&measurement); let mut input_share_bytes = transcript_2.helper_input_share.get_encoded().unwrap(); input_share_bytes.push(0); // can no longer be decoded. let report_share_2 = generate_helper_report_share_for_plaintext( - prepare_init_2.report_share().metadata().clone(), + verify_init_2.report_share().metadata().clone(), hpke_keypair.config(), transcript_2.public_share.get_encoded().unwrap(), &input_share_bytes, &InputShareAad::new( *task.id(), - prepare_init_2.report_share().metadata().clone(), + verify_init_2.report_share().metadata().clone(), transcript_2.public_share.get_encoded().unwrap(), ) .get_encoded() .unwrap(), ); - let prepare_init_2 = PrepareInit::new(report_share_2, prepare_init_2.message().clone()); + let verify_init_2 = VerifyInit::new(report_share_2, verify_init_2.message().clone()); - // prepare_init_3 has an unknown HPKE config ID. - let (prepare_init_3, transcript_3) = prep_init_generator.next(&measurement); + // verify_init_3 has an unknown HPKE config ID. + let (verify_init_3, transcript_3) = verify_init_generator.next(&measurement); let unused_hpke_config_id = HpkeConfigId::from(u8::from(*hpke_keypair.config().id()).wrapping_add(1)); @@ -293,20 +293,20 @@ async fn aggregate_init_sync() { let report_share_3 = generate_helper_report_share::( *task.id(), - prepare_init_3.report_share().metadata().clone(), + verify_init_3.report_share().metadata().clone(), &wrong_hpke_config, &transcript_3.public_share, Vec::new(), &transcript_3.helper_input_share, ); - let prepare_init_3 = PrepareInit::new(report_share_3, prepare_init_3.message().clone()); + let verify_init_3 = VerifyInit::new(report_share_3, verify_init_3.message().clone()); - // prepare_init_4 has already been aggregated in another aggregation job, with the same + // verify_init_4 has already been aggregated in another aggregation job, with the same // aggregation parameter. - let (prepare_init_4, _) = prep_init_generator.next(&measurement); + let (verify_init_4, _) = verify_init_generator.next(&measurement); - // prepare_init_5 falls into a batch that has already been collected. + // verify_init_5 falls into a batch that has already been collected. let past_clock = MockClock::new(task.time_precision().as_seconds() / 2); let report_metadata_5 = ReportMetadata::new( random(), @@ -330,15 +330,15 @@ async fn aggregate_init_sync() { &transcript_5.helper_input_share, ); - let prepare_init_5 = PrepareInit::new( + let verify_init_5 = VerifyInit::new( report_share_5, - transcript_5.leader_prepare_transitions[0] + transcript_5.leader_verify_transitions[0] .message() .unwrap() .clone(), ); - // prepare_init_6 fails decoding due to an issue with the public share. + // verify_init_6 fails decoding due to an issue with the public share. let public_share_6 = Vec::from([0]); let report_metadata_6 = ReportMetadata::new( random(), @@ -363,15 +363,15 @@ async fn aggregate_init_sync() { .unwrap(), ); - let prepare_init_6 = PrepareInit::new( + let verify_init_6 = VerifyInit::new( report_share_6, - transcript_6.leader_prepare_transitions[0] + transcript_6.leader_verify_transitions[0] .message() .unwrap() .clone(), ); - // prepare_init_7 fails due to having repeated public extensions. + // verify_init_7 fails due to having repeated public extensions. let report_metadata_7 = ReportMetadata::new( random(), clock.now().to_time(task.time_precision()), @@ -397,15 +397,15 @@ async fn aggregate_init_sync() { &transcript_7.helper_input_share, ); - let prepare_init_7 = PrepareInit::new( + let verify_init_7 = VerifyInit::new( report_share_7, - transcript_7.leader_prepare_transitions[0] + transcript_7.leader_verify_transitions[0] .message() .unwrap() .clone(), ); - // prepare_init_8 fails due to having repeated private extensions. + // verify_init_8 fails due to having repeated private extensions. let report_metadata_8 = ReportMetadata::new( random(), clock.now().to_time(task.time_precision()), @@ -431,15 +431,15 @@ async fn aggregate_init_sync() { &transcript_8.helper_input_share, ); - let prepare_init_8 = PrepareInit::new( + let verify_init_8 = VerifyInit::new( report_share_8, - transcript_8.leader_prepare_transitions[0] + transcript_8.leader_verify_transitions[0] .message() .unwrap() .clone(), ); - // prepare_init_9 fails due to having repeated extensions between the public & private + // verify_init_9 fails due to having repeated extensions between the public & private // extensions. let report_metadata_9 = ReportMetadata::new( random(), @@ -463,15 +463,15 @@ async fn aggregate_init_sync() { &transcript_9.helper_input_share, ); - let prepare_init_9 = PrepareInit::new( + let verify_init_9 = VerifyInit::new( report_share_9, - transcript_9.leader_prepare_transitions[0] + transcript_9.leader_verify_transitions[0] .message() .unwrap() .clone(), ); - // prepare_init_10 fails due to having unrecognized extension type in public extensions. + // verify_init_10 fails due to having unrecognized extension type in public extensions. let report_metadata_10 = ReportMetadata::new( random(), clock.now().to_time(task.time_precision()), @@ -494,15 +494,15 @@ async fn aggregate_init_sync() { &transcript_10.helper_input_share, ); - let prepare_init_10 = PrepareInit::new( + let verify_init_10 = VerifyInit::new( report_share_10, - transcript_10.leader_prepare_transitions[0] + transcript_10.leader_verify_transitions[0] .message() .unwrap() .clone(), ); - // prepare_init_11 fails due to having unrecognized extension type in private extensions. + // verify_init_11 fails due to having unrecognized extension type in private extensions. let report_metadata_11 = ReportMetadata::new( random(), clock.now().to_time(task.time_precision()), @@ -525,9 +525,9 @@ async fn aggregate_init_sync() { &transcript_11.helper_input_share, ); - let prepare_init_11 = PrepareInit::new( + let verify_init_11 = VerifyInit::new( report_share_11, - transcript_11.leader_prepare_transitions[0] + transcript_11.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -538,7 +538,7 @@ async fn aggregate_init_sync() { datastore .run_unnamed_tx(|tx| { let helper_task = helper_task.clone(); - let report_share_4 = prepare_init_4.report_share().clone(); + let report_share_4 = verify_init_4.report_share().clone(); Box::pin(async move { tx.put_aggregator_task(&helper_task).await.unwrap(); @@ -585,18 +585,18 @@ async fn aggregate_init_sync() { aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), Vec::from([ - prepare_init_0.clone(), - prepare_init_1.clone(), - prepare_init_2.clone(), - prepare_init_3.clone(), - prepare_init_4.clone(), - prepare_init_5.clone(), - prepare_init_6.clone(), - prepare_init_7.clone(), - prepare_init_8.clone(), - prepare_init_9.clone(), - prepare_init_10.clone(), - prepare_init_11.clone(), + verify_init_0.clone(), + verify_init_1.clone(), + verify_init_2.clone(), + verify_init_3.clone(), + verify_init_4.clone(), + verify_init_5.clone(), + verify_init_6.clone(), + verify_init_7.clone(), + verify_init_8.clone(), + verify_init_9.clone(), + verify_init_10.clone(), + verify_init_11.clone(), ]), ); @@ -610,131 +610,131 @@ async fn aggregate_init_sync() { AggregationJobResp::MEDIA_TYPE ); let aggregate_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregate_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); // Validate response. - assert_eq!(prepare_resps.len(), 12); + assert_eq!(verify_resps.len(), 12); - let prepare_step_0 = prepare_resps.first().unwrap(); + let verify_step_0 = verify_resps.first().unwrap(); assert_eq!( - prepare_step_0.report_id(), - prepare_init_0.report_share().metadata().id() + verify_step_0.report_id(), + verify_init_0.report_share().metadata().id() ); - assert_matches!(prepare_step_0.result(), PrepareStepResult::Continue { message } => { - assert_eq!(message, transcript_0.helper_prepare_transitions[0].message().unwrap()); + assert_matches!(verify_step_0.result(), VerifyStepResult::Continue { message } => { + assert_eq!(message, transcript_0.helper_verify_transitions[0].message().unwrap()); }); - let prepare_step_1 = prepare_resps.get(1).unwrap(); + let verify_step_1 = verify_resps.get(1).unwrap(); assert_eq!( - prepare_step_1.report_id(), - prepare_init_1.report_share().metadata().id() + verify_step_1.report_id(), + verify_init_1.report_share().metadata().id() ); assert_matches!( - prepare_step_1.result(), - &PrepareStepResult::Reject(ReportError::HpkeDecryptError) + verify_step_1.result(), + &VerifyStepResult::Reject(ReportError::HpkeDecryptError) ); - let prepare_step_2 = prepare_resps.get(2).unwrap(); + let verify_step_2 = verify_resps.get(2).unwrap(); assert_eq!( - prepare_step_2.report_id(), - prepare_init_2.report_share().metadata().id() + verify_step_2.report_id(), + verify_init_2.report_share().metadata().id() ); assert_matches!( - prepare_step_2.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage) + verify_step_2.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage) ); - let prepare_step_3 = prepare_resps.get(3).unwrap(); + let verify_step_3 = verify_resps.get(3).unwrap(); assert_eq!( - prepare_step_3.report_id(), - prepare_init_3.report_share().metadata().id() + verify_step_3.report_id(), + verify_init_3.report_share().metadata().id() ); assert_matches!( - prepare_step_3.result(), - &PrepareStepResult::Reject(ReportError::HpkeUnknownConfigId) + verify_step_3.result(), + &VerifyStepResult::Reject(ReportError::HpkeUnknownConfigId) ); - let prepare_step_4 = prepare_resps.get(4).unwrap(); + let verify_step_4 = verify_resps.get(4).unwrap(); assert_eq!( - prepare_step_4.report_id(), - prepare_init_4.report_share().metadata().id() + verify_step_4.report_id(), + verify_init_4.report_share().metadata().id() ); assert_eq!( - prepare_step_4.result(), - &PrepareStepResult::Reject(ReportError::ReportReplayed) + verify_step_4.result(), + &VerifyStepResult::Reject(ReportError::ReportReplayed) ); - let prepare_step_5 = prepare_resps.get(5).unwrap(); + let verify_step_5 = verify_resps.get(5).unwrap(); assert_eq!( - prepare_step_5.report_id(), - prepare_init_5.report_share().metadata().id() + verify_step_5.report_id(), + verify_init_5.report_share().metadata().id() ); assert_eq!( - prepare_step_5.result(), - &PrepareStepResult::Reject(ReportError::BatchCollected) + verify_step_5.result(), + &VerifyStepResult::Reject(ReportError::BatchCollected) ); - let prepare_step_6 = prepare_resps.get(6).unwrap(); + let verify_step_6 = verify_resps.get(6).unwrap(); assert_eq!( - prepare_step_6.report_id(), - prepare_init_6.report_share().metadata().id() + verify_step_6.report_id(), + verify_init_6.report_share().metadata().id() ); assert_eq!( - prepare_step_6.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage), + verify_step_6.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage), ); - let prepare_step_7 = prepare_resps.get(7).unwrap(); + let verify_step_7 = verify_resps.get(7).unwrap(); assert_eq!( - prepare_step_7.report_id(), - prepare_init_7.report_share().metadata().id() + verify_step_7.report_id(), + verify_init_7.report_share().metadata().id() ); assert_eq!( - prepare_step_7.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage), + verify_step_7.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage), ); - let prepare_step_8 = prepare_resps.get(8).unwrap(); + let verify_step_8 = verify_resps.get(8).unwrap(); assert_eq!( - prepare_step_8.report_id(), - prepare_init_8.report_share().metadata().id() + verify_step_8.report_id(), + verify_init_8.report_share().metadata().id() ); assert_eq!( - prepare_step_8.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage), + verify_step_8.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage), ); - let prepare_step_9 = prepare_resps.get(9).unwrap(); + let verify_step_9 = verify_resps.get(9).unwrap(); assert_eq!( - prepare_step_9.report_id(), - prepare_init_9.report_share().metadata().id() + verify_step_9.report_id(), + verify_init_9.report_share().metadata().id() ); assert_eq!( - prepare_step_9.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage), + verify_step_9.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage), ); - let prepare_step_10 = prepare_resps.get(10).unwrap(); + let verify_step_10 = verify_resps.get(10).unwrap(); assert_eq!( - prepare_step_10.report_id(), - prepare_init_10.report_share().metadata().id() + verify_step_10.report_id(), + verify_init_10.report_share().metadata().id() ); assert_eq!( - prepare_step_10.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage), + verify_step_10.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage), ); - let prepare_step_11 = prepare_resps.get(11).unwrap(); + let verify_step_11 = verify_resps.get(11).unwrap(); assert_eq!( - prepare_step_11.report_id(), - prepare_init_11.report_share().metadata().id() + verify_step_11.report_id(), + verify_init_11.report_share().metadata().id() ); assert_eq!( - prepare_step_11.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage), + verify_step_11.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage), ); // Check aggregation job in datastore. @@ -807,7 +807,7 @@ async fn aggregate_init_async() { let vdaf = dummy::Vdaf::new(1); let measurement = 0; - let prep_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), hpke_keypair.config().clone(), @@ -815,17 +815,17 @@ async fn aggregate_init_async() { dummy::AggregationParam(0), ); - // prepare_init_0 is a "happy path" report. - let (prepare_init_0, _) = prep_init_generator.next(&measurement); + // verify_init_0 is a "happy path" report. + let (verify_init_0, _) = verify_init_generator.next(&measurement); - // prepare_init_1 has already been aggregated in another aggregation job, with the same + // verify_init_1 has already been aggregated in another aggregation job, with the same // aggregation parameter. - let (prepare_init_1, _) = prep_init_generator.next(&measurement); + let (verify_init_1, _) = verify_init_generator.next(&measurement); datastore .run_unnamed_tx(|tx| { let helper_task = helper_task.clone(); - let report_share_1 = prepare_init_1.report_share().clone(); + let report_share_1 = verify_init_1.report_share().clone(); Box::pin(async move { tx.put_aggregator_task(&helper_task).await.unwrap(); @@ -850,7 +850,7 @@ async fn aggregate_init_async() { let request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([prepare_init_0.clone(), prepare_init_1.clone()]), + Vec::from([verify_init_0.clone(), verify_init_1.clone()]), ); // Send request, parse response. Do this twice to prove that the request is idempotent. @@ -909,19 +909,19 @@ async fn aggregate_init_async() { assert_eq!( report_aggregations[0].report_id(), - prepare_init_0.report_share().metadata().id() + verify_init_0.report_share().metadata().id() ); assert_eq!( report_aggregations[0].state(), &ReportAggregationState::HelperInitProcessing { - prepare_init: prepare_init_0.clone(), + verify_init: verify_init_0.clone(), require_taskbind_extension: false } ); assert_eq!( report_aggregations[1].report_id(), - prepare_init_1.report_share().metadata().id() + verify_init_1.report_share().metadata().id() ); assert_eq!( report_aggregations[1].state(), @@ -967,7 +967,7 @@ async fn aggregate_init_batch_already_collected() { datastore.put_aggregator_task(&helper_task).await.unwrap(); let vdaf = dummy::Vdaf::new(1); - let prep_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), hpke_keypair.config().clone(), @@ -975,14 +975,14 @@ async fn aggregate_init_batch_already_collected() { dummy::AggregationParam(0), ); - let (prepare_init, _) = prep_init_generator.next(&0); + let (verify_init, _) = verify_init_generator.next(&0); let aggregation_param = dummy::AggregationParam(0); let batch_id = random(); let request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([prepare_init.clone()]), + Vec::from([verify_init.clone()]), ); // Pretend that we're another concurrently running process: insert some aggregations to the @@ -990,7 +990,7 @@ async fn aggregate_init_batch_already_collected() { datastore .run_unnamed_tx(|tx| { let task = task.clone(); - let timestamp = *prepare_init.report_share().metadata().time(); + let timestamp = *verify_init.report_share().metadata().time(); Box::pin(async move { let interval = Interval::minimal(timestamp).unwrap(); @@ -1039,19 +1039,19 @@ async fn aggregate_init_batch_already_collected() { assert_eq!(response.status(), StatusCode::CREATED); let aggregate_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregate_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); - let prepare_step = prepare_resps.first().unwrap(); + let verify_step = verify_resps.first().unwrap(); assert_eq!( - prepare_step.report_id(), - prepare_init.report_share().metadata().id() + verify_step.report_id(), + verify_init.report_share().metadata().id() ); assert_eq!( - prepare_step.result(), - &PrepareStepResult::Reject(ReportError::BatchCollected) + verify_step.result(), + &VerifyStepResult::Reject(ReportError::BatchCollected) ); assert_task_aggregation_counter(&datastore, *task.id(), TaskAggregationCounter::default()) @@ -1059,7 +1059,7 @@ async fn aggregate_init_batch_already_collected() { } #[tokio::test] -async fn aggregate_init_prep_init_failed() { +async fn aggregate_init_verify_init_failed() { let HttpHandlerTest { clock, ephemeral_datastore: _ephemeral_datastore, @@ -1072,11 +1072,11 @@ async fn aggregate_init_prep_init_failed() { let task = TaskBuilder::new( BatchMode::TimeInterval, AggregationMode::Synchronous, - VdafInstance::FakeFailsPrepInit, + VdafInstance::FakeFailsVerifyInit, ) .build(); let helper_task = task.helper_view().unwrap(); - let prep_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), hpke_keypair.config().clone(), @@ -1086,11 +1086,11 @@ async fn aggregate_init_prep_init_failed() { datastore.put_aggregator_task(&helper_task).await.unwrap(); - let (prepare_init, _) = prep_init_generator.next(&0); + let (verify_init, _) = verify_init_generator.next(&0); let request = AggregationJobInitializeReq::new( dummy::AggregationParam(0).get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([prepare_init.clone()]), + Vec::from([verify_init.clone()]), ); // Send request, and parse response. @@ -1102,22 +1102,22 @@ async fn aggregate_init_prep_init_failed() { AggregationJobResp::MEDIA_TYPE ); let aggregate_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregate_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); // Validate response. - assert_eq!(prepare_resps.len(), 1); + assert_eq!(verify_resps.len(), 1); - let prepare_step = prepare_resps.first().unwrap(); + let verify_step = verify_resps.first().unwrap(); assert_eq!( - prepare_step.report_id(), - prepare_init.report_share().metadata().id() + verify_step.report_id(), + verify_init.report_share().metadata().id() ); assert_matches!( - prepare_step.result(), - &PrepareStepResult::Reject(ReportError::VdafPrepError) + verify_step.result(), + &VerifyStepResult::Reject(ReportError::VdafVerifyError) ); assert_task_aggregation_counter(&datastore, *task.id(), TaskAggregationCounter::default()) @@ -1125,7 +1125,7 @@ async fn aggregate_init_prep_init_failed() { } #[tokio::test] -async fn aggregate_init_prep_step_failed() { +async fn aggregate_init_verify_step_failed() { let HttpHandlerTest { clock, ephemeral_datastore: _ephemeral_datastore, @@ -1138,11 +1138,11 @@ async fn aggregate_init_prep_step_failed() { let task = TaskBuilder::new( BatchMode::TimeInterval, AggregationMode::Synchronous, - VdafInstance::FakeFailsPrepStep, + VdafInstance::FakeFailsVerifyStep, ) .build(); let helper_task = task.helper_view().unwrap(); - let prep_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), hpke_keypair.config().clone(), @@ -1152,11 +1152,11 @@ async fn aggregate_init_prep_step_failed() { datastore.put_aggregator_task(&helper_task).await.unwrap(); - let (prepare_init, _) = prep_init_generator.next(&0); + let (verify_init, _) = verify_init_generator.next(&0); let request = AggregationJobInitializeReq::new( dummy::AggregationParam(0).get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([prepare_init.clone()]), + Vec::from([verify_init.clone()]), ); let aggregation_job_id: AggregationJobId = random(); @@ -1167,22 +1167,22 @@ async fn aggregate_init_prep_step_failed() { AggregationJobResp::MEDIA_TYPE ); let aggregate_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregate_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); // Validate response. - assert_eq!(prepare_resps.len(), 1); + assert_eq!(verify_resps.len(), 1); - let prepare_step = prepare_resps.first().unwrap(); + let verify_step = verify_resps.first().unwrap(); assert_eq!( - prepare_step.report_id(), - prepare_init.report_share().metadata().id() + verify_step.report_id(), + verify_init.report_share().metadata().id() ); assert_matches!( - prepare_step.result(), - &PrepareStepResult::Reject(ReportError::VdafPrepError) + verify_step.result(), + &VerifyStepResult::Reject(ReportError::VdafVerifyError) ); assert_task_aggregation_counter(&datastore, *task.id(), TaskAggregationCounter::default()) @@ -1208,7 +1208,7 @@ async fn aggregate_init_duplicated_report_id() { .build(); let helper_task = task.helper_view().unwrap(); - let prep_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), hpke_keypair.config().clone(), @@ -1218,12 +1218,12 @@ async fn aggregate_init_duplicated_report_id() { datastore.put_aggregator_task(&helper_task).await.unwrap(); - let (prepare_init, _) = prep_init_generator.next(&0); + let (verify_init, _) = verify_init_generator.next(&0); let request = AggregationJobInitializeReq::new( dummy::AggregationParam(0).get_encoded().unwrap(), PartialBatchSelector::new_time_interval(), - Vec::from([prepare_init.clone(), prepare_init]), + Vec::from([verify_init.clone(), verify_init]), ); let aggregation_job_id: AggregationJobId = random(); @@ -1251,8 +1251,8 @@ async fn aggregate_init_partially_replayed_aggregation_init() { // Create 5 reports, 1-5. Send one aggregation job init request containing reports 1 and 2. It // should succeed normally. Then send another init request containing reports 1-5. We expect: // - the request overall succeeds (i.e. HTTP 200) - // - the PrepareResps for reports 1 and 2 indicate rejection - // - the PrepareResps for reports 3-5 indicate success + // - the VerifyResps for reports 1 and 2 indicate rejection + // - the VerifyResps for reports 3-5 indicate success // We then send an aggregate share request for the batch ID. It should succeed and all five // reports should be included. let HttpHandlerTest { @@ -1279,7 +1279,7 @@ async fn aggregate_init_partially_replayed_aggregation_init() { let partial_batch_selector = PartialBatchSelector::new_leader_selected(batch_id); let helper_task = task.helper_view().unwrap(); - let prep_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), hpke_keypair.config().clone(), @@ -1289,17 +1289,17 @@ async fn aggregate_init_partially_replayed_aggregation_init() { datastore.put_aggregator_task(&helper_task).await.unwrap(); - let (prepare_init_1, _) = prep_init_generator.next(&1); - let (prepare_init_2, _) = prep_init_generator.next(&2); - let (prepare_init_3, _) = prep_init_generator.next(&3); - let (prepare_init_4, _) = prep_init_generator.next(&4); - let (prepare_init_5, _) = prep_init_generator.next(&5); + let (verify_init_1, _) = verify_init_generator.next(&1); + let (verify_init_2, _) = verify_init_generator.next(&2); + let (verify_init_3, _) = verify_init_generator.next(&3); + let (verify_init_4, _) = verify_init_generator.next(&4); + let (verify_init_5, _) = verify_init_generator.next(&5); let report_ids: Vec<_> = [ - &prepare_init_1, - &prepare_init_2, - &prepare_init_3, - &prepare_init_4, - &prepare_init_5, + &verify_init_1, + &verify_init_2, + &verify_init_3, + &verify_init_4, + &verify_init_5, ] .iter() .map(|pi| *pi.report_share().metadata().id()) @@ -1308,7 +1308,7 @@ async fn aggregate_init_partially_replayed_aggregation_init() { let request = AggregationJobInitializeReq::new( agg_param.clone(), partial_batch_selector.clone(), - Vec::from([prepare_init_1.clone(), prepare_init_2.clone()]), + Vec::from([verify_init_1.clone(), verify_init_2.clone()]), ); let mut response = put_aggregation_job(&task, &random(), &request, &router).await; @@ -1319,25 +1319,25 @@ async fn aggregate_init_partially_replayed_aggregation_init() { assert_eq!( &report_ids[0..2], request - .prepare_inits() + .verify_inits() .iter() .map(|init| *init.report_share().metadata().id()) .collect::>() .as_slice(), ); - for resp in &aggregate_resp.prepare_resps { - assert_matches!(resp.result(), &PrepareStepResult::Continue { .. }); + for resp in &aggregate_resp.verify_resps { + assert_matches!(resp.result(), &VerifyStepResult::Continue { .. }); } let request = AggregationJobInitializeReq::new( agg_param.clone(), partial_batch_selector, Vec::from([ - prepare_init_1.clone(), - prepare_init_2.clone(), - prepare_init_3.clone(), - prepare_init_4.clone(), - prepare_init_5.clone(), + verify_init_1.clone(), + verify_init_2.clone(), + verify_init_3.clone(), + verify_init_4.clone(), + verify_init_5.clone(), ]), ); @@ -1349,23 +1349,23 @@ async fn aggregate_init_partially_replayed_aggregation_init() { assert_eq!( report_ids, request - .prepare_inits() + .verify_inits() .iter() .map(|init| *init.report_share().metadata().id()) .collect::>(), ); - for resp in &aggregate_resp.prepare_resps { + for resp in &aggregate_resp.verify_resps { if report_ids[0..2].contains(resp.report_id()) { assert_matches!( resp.result(), - &PrepareStepResult::Reject(ReportError::ReportReplayed), + &VerifyStepResult::Reject(ReportError::ReportReplayed), "first two reports must be rejected as replays", ) } if report_ids[2..5].contains(resp.report_id()) { assert_matches!( resp.result(), - &PrepareStepResult::Continue { .. }, + &VerifyStepResult::Continue { .. }, "last three reports must be accepted", ); } diff --git a/aggregator/src/aggregator/http_handlers/tests/collection_job.rs b/aggregator/src/aggregator/http_handlers/tests/collection_job.rs index 59c955265..3318f5924 100644 --- a/aggregator/src/aggregator/http_handlers/tests/collection_job.rs +++ b/aggregator/src/aggregator/http_handlers/tests/collection_job.rs @@ -159,7 +159,7 @@ async fn collection_job_put_request_invalid_batch_size() { .. } = HttpHandlerTest::new().await; - // Prepare parameters. + // Set up parameters. let task = TaskBuilder::new( BatchMode::TimeInterval, AggregationMode::Synchronous, diff --git a/aggregator/src/aggregator/http_handlers/tests/helper_e2e.rs b/aggregator/src/aggregator/http_handlers/tests/helper_e2e.rs index bbf8d793c..c36d7e0ad 100644 --- a/aggregator/src/aggregator/http_handlers/tests/helper_e2e.rs +++ b/aggregator/src/aggregator/http_handlers/tests/helper_e2e.rs @@ -4,7 +4,7 @@ use janus_aggregator_core::task::{AggregationMode, BatchMode, test_util::TaskBui use janus_core::{report_id::ReportIdChecksumExt, vdaf::VdafInstance}; use janus_messages::{ AggregateShareId, AggregateShareReq, AggregationJobInitializeReq, AggregationJobResp, - BatchSelector, PartialBatchSelector, PrepareStepResult, ReportError, ReportIdChecksum, + BatchSelector, PartialBatchSelector, ReportError, ReportIdChecksum, VerifyStepResult, batch_mode::LeaderSelected, }; use prio::{ @@ -14,7 +14,7 @@ use prio::{ use rand::random; use crate::aggregator::{ - aggregation_job_init::test_util::{PrepareInitGenerator, put_aggregation_job}, + aggregation_job_init::test_util::{VerifyInitGenerator, put_aggregation_job}, http_handlers::{ test_util::{HttpHandlerTest, take_response_body}, tests::aggregate_share::put_aggregate_share_request, @@ -49,16 +49,16 @@ async fn helper_aggregation_report_share_replay() { let helper_task = task.helper_view().unwrap(); datastore.put_aggregator_task(&helper_task).await.unwrap(); - let prep_init_generator = PrepareInitGenerator::new( + let verify_init_generator = VerifyInitGenerator::new( clock.clone(), helper_task.clone(), hpke_keypair.config().clone(), vdaf.clone(), agg_param, ); - let (replayed_report, _replayed_report_transcript) = prep_init_generator.next(&7); - let (other_report_1, _other_report_1_transcript) = prep_init_generator.next(&11); - let (other_report_2, _other_report_2_transcript) = prep_init_generator.next(&23); + let (replayed_report, _replayed_report_transcript) = verify_init_generator.next(&7); + let (other_report_1, _other_report_1_transcript) = verify_init_generator.next(&11); + let (other_report_2, _other_report_2_transcript) = verify_init_generator.next(&23); let batch_id_1 = random(); let batch_id_2 = random(); @@ -93,23 +93,23 @@ async fn helper_aggregation_report_share_replay() { checksum_2, ); - // Make aggregation job initialization requests, and check the prepare step results. + // Make aggregation job initialization requests, and check the verify step results. let mut response = put_aggregation_job(&task, &aggregation_job_id_1, &agg_init_req_1, &router).await; assert_eq!(response.status(), StatusCode::CREATED); let agg_init_resp_1 = AggregationJobResp::get_decoded(take_response_body(&mut response).await.as_ref()).unwrap(); - let prepare_resps_1 = assert_matches!( + let verify_resps_1 = assert_matches!( agg_init_resp_1, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); assert_matches!( - prepare_resps_1[0].result(), - PrepareStepResult::Continue { .. } + verify_resps_1[0].result(), + VerifyStepResult::Continue { .. } ); assert_matches!( - prepare_resps_1[1].result(), - PrepareStepResult::Continue { .. } + verify_resps_1[1].result(), + VerifyStepResult::Continue { .. } ); let mut response = @@ -117,17 +117,17 @@ async fn helper_aggregation_report_share_replay() { assert_eq!(response.status(), StatusCode::CREATED); let agg_init_resp_2 = AggregationJobResp::get_decoded(take_response_body(&mut response).await.as_ref()).unwrap(); - let prepare_resps_2 = assert_matches!( + let verify_resps_2 = assert_matches!( agg_init_resp_2, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); assert_matches!( - prepare_resps_2[0].result(), - PrepareStepResult::Reject(ReportError::ReportReplayed) + verify_resps_2[0].result(), + VerifyStepResult::Reject(ReportError::ReportReplayed) ); assert_matches!( - prepare_resps_2[1].result(), - PrepareStepResult::Continue { .. } + verify_resps_2[1].result(), + VerifyStepResult::Continue { .. } ); // Make aggregate share requests. If these succeed, then the helper's report_count and checksum diff --git a/aggregator/src/aggregator/taskprov_tests.rs b/aggregator/src/aggregator/taskprov_tests.rs index d225e67a8..fe2aa05d0 100644 --- a/aggregator/src/aggregator/taskprov_tests.rs +++ b/aggregator/src/aggregator/taskprov_tests.rs @@ -35,9 +35,8 @@ use janus_messages::{ AggregateShare as AggregateShareMessage, AggregateShareAad, AggregateShareId, AggregateShareReq, AggregationJobContinueReq, AggregationJobId, AggregationJobInitializeReq, AggregationJobResp, AggregationJobStep, BatchSelector, Duration, Extension, ExtensionType, - Interval, MediaType, PartialBatchSelector, PrepareContinue, PrepareInit, PrepareResp, - PrepareStepResult, ReportError, ReportIdChecksum, ReportShare, Role, TaskId, Time, - TimePrecision, + Interval, MediaType, PartialBatchSelector, ReportError, ReportIdChecksum, ReportShare, Role, + TaskId, Time, TimePrecision, VerifyContinue, VerifyInit, VerifyResp, VerifyStepResult, batch_mode::{self, LeaderSelected}, codec::{Decode, Encode}, taskprov::{TaskConfig, VdafConfig}, @@ -55,7 +54,7 @@ use super::http_handlers::AggregatorHandlerBuilder; use crate::{ aggregator::{ Config, - aggregation_job_init::test_util::PrepareInitGenerator, + aggregation_job_init::test_util::VerifyInitGenerator, http_handlers::test_util::{decode_response_body, take_problem_details}, }, config::TaskprovConfig, @@ -235,7 +234,7 @@ where ReportShare, V::AggregationParam, ) { - let (report_share, transcript) = PrepareInitGenerator::new( + let (report_share, transcript) = VerifyInitGenerator::new( self.clock.clone(), self.task.helper_view().unwrap(), self.hpke_key.config().clone(), @@ -259,9 +258,9 @@ async fn taskprov_aggregate_init() { let request_1 = AggregationJobInitializeReq::new( aggregation_param_1.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id_1), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share_1.clone(), - transcript_1.leader_prepare_transitions[0] + transcript_1.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -274,9 +273,9 @@ async fn taskprov_aggregate_init() { let request_2 = AggregationJobInitializeReq::new( aggregation_param_2.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id_2), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share_2.clone(), - transcript_2.leader_prepare_transitions[0] + transcript_2.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -349,21 +348,21 @@ async fn taskprov_aggregate_init() { AggregationJobResp::MEDIA_TYPE, ); let aggregate_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregate_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); - assert_eq!(prepare_resps.len(), 1, "{name}"); - let prepare_step = prepare_resps.first().unwrap(); + assert_eq!(verify_resps.len(), 1, "{name}"); + let verify_step = verify_resps.first().unwrap(); assert_eq!( - prepare_step.report_id(), + verify_step.report_id(), report_share.metadata().id(), "{name}", ); assert_matches!( - prepare_step.result(), - &PrepareStepResult::Continue { .. }, + verify_step.result(), + &VerifyStepResult::Continue { .. }, "{name}", ); } @@ -420,9 +419,9 @@ async fn taskprov_aggregate_init_missing_extension() { let request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share.clone(), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -463,17 +462,17 @@ async fn taskprov_aggregate_init_missing_extension() { AggregationJobResp::MEDIA_TYPE, ); let aggregate_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregate_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); - assert_eq!(prepare_resps.len(), 1); - let prepare_step = prepare_resps.first().unwrap(); - assert_eq!(prepare_step.report_id(), report_share.metadata().id(),); + assert_eq!(verify_resps.len(), 1); + let verify_step = verify_resps.first().unwrap(); + assert_eq!(verify_step.report_id(), report_share.metadata().id(),); assert_eq!( - prepare_step.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage), + verify_step.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage), ); let (aggregation_jobs, got_task) = test @@ -514,9 +513,9 @@ async fn taskprov_aggregate_init_malformed_extension() { let request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share.clone(), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -557,17 +556,17 @@ async fn taskprov_aggregate_init_malformed_extension() { AggregationJobResp::MEDIA_TYPE, ); let aggregate_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregate_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); - assert_eq!(prepare_resps.len(), 1); - let prepare_step = prepare_resps.first().unwrap(); - assert_eq!(prepare_step.report_id(), report_share.metadata().id(),); + assert_eq!(verify_resps.len(), 1); + let verify_step = verify_resps.first().unwrap(); + assert_eq!(verify_step.report_id(), report_share.metadata().id(),); assert_eq!( - prepare_step.result(), - &PrepareStepResult::Reject(ReportError::InvalidMessage), + verify_step.result(), + &VerifyStepResult::Reject(ReportError::InvalidMessage), ); let (aggregation_jobs, got_task) = test @@ -609,9 +608,9 @@ async fn taskprov_opt_out_task_ended_regression() { let request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share.clone(), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -661,9 +660,9 @@ async fn taskprov_opt_out_mismatched_task_id() { let request = AggregationJobInitializeReq::new( ().get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share.clone(), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -736,9 +735,9 @@ async fn taskprov_opt_out_peer_aggregator_wrong_role() { let request = AggregationJobInitializeReq::new( ().get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share.clone(), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -809,9 +808,9 @@ async fn taskprov_opt_out_peer_aggregator_does_not_exist() { let request = AggregationJobInitializeReq::new( ().get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share.clone(), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -919,7 +918,7 @@ async fn taskprov_aggregate_continue() { 0, None, ReportAggregationState::HelperContinue { - prepare_state: *transcript.helper_prepare_transitions[0].prepare_state(), + verify_state: *transcript.helper_verify_transitions[0].verify_state(), }, )) .await?; @@ -943,9 +942,9 @@ async fn taskprov_aggregate_continue() { let request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( *report_share.metadata().id(), - transcript.leader_prepare_transitions[1] + transcript.leader_verify_transitions[1] .message() .unwrap() .clone(), @@ -1020,9 +1019,9 @@ async fn taskprov_aggregate_continue() { assert_eq!( aggregate_resp, AggregationJobResp { - prepare_resps: Vec::from([PrepareResp::new( + verify_resps: Vec::from([VerifyResp::new( *report_share.metadata().id(), - PrepareStepResult::Finished + VerifyStepResult::Finished )]) } ); @@ -1168,9 +1167,9 @@ async fn end_to_end() { let aggregation_job_init_request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share.clone(), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -1212,28 +1211,28 @@ async fn end_to_end() { AggregationJobResp::MEDIA_TYPE, ); let aggregation_job_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregation_job_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); - assert_eq!(prepare_resps.len(), 1); - let prepare_resp = &prepare_resps[0]; - assert_eq!(prepare_resp.report_id(), report_share.metadata().id()); + assert_eq!(verify_resps.len(), 1); + let verify_resp = &verify_resps[0]; + assert_eq!(verify_resp.report_id(), report_share.metadata().id()); let message = assert_matches!( - prepare_resp.result(), - PrepareStepResult::Continue { message } => message.clone() + verify_resp.result(), + VerifyStepResult::Continue { message } => message.clone() ); assert_eq!( &message, - transcript.helper_prepare_transitions[0].message().unwrap() + transcript.helper_verify_transitions[0].message().unwrap() ); let aggregation_job_continue_request = AggregationJobContinueReq::new( AggregationJobStep::from(1), - Vec::from([PrepareContinue::new( + Vec::from([VerifyContinue::new( *report_share.metadata().id(), - transcript.leader_prepare_transitions[1] + transcript.leader_verify_transitions[1] .message() .unwrap() .clone(), @@ -1275,15 +1274,15 @@ async fn end_to_end() { AggregationJobResp::MEDIA_TYPE, ); let aggregation_job_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregation_job_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); - assert_eq!(prepare_resps.len(), 1); - let prepare_resp = &prepare_resps[0]; - assert_eq!(prepare_resp.report_id(), report_share.metadata().id()); - assert_matches!(prepare_resp.result(), PrepareStepResult::Finished); + assert_eq!(verify_resps.len(), 1); + let verify_resp = &verify_resps[0]; + assert_eq!(verify_resp.report_id(), report_share.metadata().id()); + assert_matches!(verify_resp.result(), VerifyStepResult::Finished); let checksum = ReportIdChecksum::for_report_id(report_share.metadata().id()); let aggregate_share_request = AggregateShareReq::new( @@ -1367,9 +1366,9 @@ async fn end_to_end_sumvec_hmac() { let aggregation_job_init_request = AggregationJobInitializeReq::new( aggregation_param.get_encoded().unwrap(), PartialBatchSelector::new_leader_selected(batch_id), - Vec::from([PrepareInit::new( + Vec::from([VerifyInit::new( report_share.clone(), - transcript.leader_prepare_transitions[0] + transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -1411,18 +1410,18 @@ async fn end_to_end_sumvec_hmac() { AggregationJobResp::MEDIA_TYPE, ); let aggregation_job_resp: AggregationJobResp = decode_response_body(&mut response).await; - let prepare_resps = assert_matches!( + let verify_resps = assert_matches!( aggregation_job_resp, - AggregationJobResp { prepare_resps } => prepare_resps + AggregationJobResp { verify_resps } => verify_resps ); - assert_eq!(prepare_resps.len(), 1); - let prepare_resp = &prepare_resps[0]; - assert_eq!(prepare_resp.report_id(), report_share.metadata().id()); - let message = assert_matches!(prepare_resp.result(), PrepareStepResult::Continue { message } => message.clone()); + assert_eq!(verify_resps.len(), 1); + let verify_resp = &verify_resps[0]; + assert_eq!(verify_resp.report_id(), report_share.metadata().id()); + let message = assert_matches!(verify_resp.result(), VerifyStepResult::Continue { message } => message.clone()); assert_eq!( &message, - transcript.helper_prepare_transitions[0].message().unwrap() + transcript.helper_verify_transitions[0].message().unwrap() ); let checksum = ReportIdChecksum::for_report_id(report_share.metadata().id()); diff --git a/aggregator/src/config.rs b/aggregator/src/config.rs index 17dee4d53..a24bb246e 100644 --- a/aggregator/src/config.rs +++ b/aggregator/src/config.rs @@ -53,7 +53,7 @@ pub struct CommonConfig { #[serde(default = "default_max_transaction_retries")] pub max_transaction_retries: u64, - /// Stack size, in bytes, for threads used for VDAF preparation. + /// Stack size, in bytes, for threads used for VDAF verification. /// /// Optional. If not set, the default stack size will be used. This is currently 2 MiB on most /// platforms. See . diff --git a/aggregator/src/metrics.rs b/aggregator/src/metrics.rs index 384909e8a..6c6128ff2 100644 --- a/aggregator/src/metrics.rs +++ b/aggregator/src/metrics.rs @@ -327,21 +327,21 @@ pub(crate) fn aggregate_step_failure_counter(meter: &Meter) -> Counter { // Initialize counters with desired status labels. This causes Prometheus to see the first // non-zero value we record. for failure_type in [ - "missing_prepare_message", + "missing_verify_message", "missing_leader_input_share", "missing_helper_input_share", - "prepare_init_failure", - "prepare_step_failure", - "prepare_message_failure", + "verify_init_failure", + "verify_step_failure", + "verify_message_failure", "unknown_hpke_config_id", "decrypt_failure", "input_share_decode_failure", "input_share_aad_encode_failure", "public_share_decode_failure", "public_share_encode_failure", - "prepare_message_decode_failure", - "leader_prep_share_decode_failure", - "helper_prep_share_decode_failure", + "verify_message_decode_failure", + "leader_verify_share_decode_failure", + "helper_verify_share_decode_failure", "continue_mismatch", "accumulate_failure", "finish_mismatch", @@ -349,7 +349,7 @@ pub(crate) fn aggregate_step_failure_counter(meter: &Meter) -> Counter { "plaintext_input_share_decode_failure", "duplicate_extension", "missing_client_report", - "missing_prepare_message", + "missing_verify_message", "missing_or_malformed_taskbind_extension", "unexpected_taskbind_extension", "unrecognized_extension", diff --git a/aggregator_core/src/datastore.rs b/aggregator_core/src/datastore.rs index 1c1476315..76a2c15d8 100644 --- a/aggregator_core/src/datastore.rs +++ b/aggregator_core/src/datastore.rs @@ -27,8 +27,8 @@ use janus_core::{ }; use janus_messages::{ AggregateShareId, AggregationJobId, BatchId, CollectionJobId, Duration, Extension, - HpkeCiphertext, HpkeConfig, HpkeConfigId, Interval, PrepareContinue, PrepareInit, PrepareResp, - Query, ReportId, ReportIdChecksum, ReportMetadata, Role, TaskId, Time, TimePrecision, + HpkeCiphertext, HpkeConfig, HpkeConfigId, Interval, Query, ReportId, ReportIdChecksum, + ReportMetadata, Role, TaskId, Time, TimePrecision, VerifyContinue, VerifyInit, VerifyResp, batch_mode::{BatchMode, LeaderSelected, TimeInterval}, }; use leases::{acquired_aggregation_job_from_row, acquired_collection_job_from_row}; @@ -1288,7 +1288,7 @@ RETURNING report_id, client_timestamp", /// This should only be used with VDAFs with a non-unit type aggregation parameter. If a VDAF /// has the unit type as its aggregation parameter, then /// `get_unaggregated_client_report_ids_for_task` should be used instead. In such cases, it is - /// not necessary to wait for a collection job to arrive before preparing reports. + /// not necessary to wait for a collection job to arrive before verifying reports. /// /// This function deliberately ignores the `client_reports.aggregation_started` column, which /// only has meaning for VDAFs without aggregation parameters. @@ -2201,12 +2201,12 @@ WHERE aggregation_jobs.task_id = $6 .prepare_cached( "-- get_report_aggregations_for_aggregation_job() SELECT - ord, client_report_id, client_timestamp, last_prep_resp, + ord, client_report_id, client_timestamp, last_verify_resp, report_aggregations.state, public_extensions, public_share, leader_private_extensions, leader_input_share, - helper_encrypted_input_share, leader_prep_transition, leader_prep_state, - prepare_init, require_taskbind_extension, - helper_prep_state, prepare_continue, error_code + helper_encrypted_input_share, leader_verify_transition, leader_verify_state, + verify_init, require_taskbind_extension, + helper_verify_state, verify_continue, error_code FROM report_aggregations JOIN aggregation_jobs ON aggregation_jobs.id = report_aggregations.aggregation_job_id WHERE report_aggregations.task_id = $1 @@ -2263,11 +2263,11 @@ ORDER BY ord ASC", .prepare_cached( "-- get_report_aggregation_by_report_id() SELECT - ord, client_timestamp, last_prep_resp, report_aggregations.state, + ord, client_timestamp, last_verify_resp, report_aggregations.state, public_extensions, public_share, leader_private_extensions, - leader_input_share, helper_encrypted_input_share, leader_prep_transition, - leader_prep_state, prepare_init, - require_taskbind_extension, helper_prep_state, prepare_continue, error_code + leader_input_share, helper_encrypted_input_share, leader_verify_transition, + leader_verify_state, verify_init, + require_taskbind_extension, helper_verify_state, verify_continue, error_code FROM report_aggregations JOIN aggregation_jobs ON aggregation_jobs.id = report_aggregations.aggregation_job_id @@ -2325,11 +2325,11 @@ WHERE report_aggregations.task_id = $1 "-- get_report_aggregations_for_task() SELECT aggregation_jobs.aggregation_job_id, ord, client_report_id, - client_timestamp, last_prep_resp, report_aggregations.state, + client_timestamp, last_verify_resp, report_aggregations.state, public_extensions, public_share, leader_private_extensions, - leader_input_share, helper_encrypted_input_share, leader_prep_transition, - leader_prep_state, prepare_init, - require_taskbind_extension, helper_prep_state, prepare_continue, error_code + leader_input_share, helper_encrypted_input_share, leader_verify_transition, + leader_verify_state, verify_init, + require_taskbind_extension, helper_verify_state, verify_continue, error_code FROM report_aggregations JOIN aggregation_jobs ON aggregation_jobs.id = report_aggregations.aggregation_job_id WHERE report_aggregations.task_id = $1 @@ -2374,10 +2374,10 @@ WHERE report_aggregations.task_id = $1 let time = Time::from_date_time(row.get("client_timestamp"), time_precision); let state: ReportAggregationStateCode = row.get("state"); let error_code: Option = row.get("error_code"); - let last_prep_resp_bytes: Option> = row.get("last_prep_resp"); + let last_verify_resp_bytes: Option> = row.get("last_verify_resp"); - let last_prep_resp = last_prep_resp_bytes - .map(|bytes| PrepareResp::get_decoded(&bytes)) + let last_verify_resp = last_verify_resp_bytes + .map(|bytes| VerifyResp::get_decoded(&bytes)) .transpose()?; let agg_state = match state { @@ -2446,11 +2446,11 @@ WHERE report_aggregations.task_id = $1 } ReportAggregationStateCode::InitProcessing => { - let prepare_init_bytes = - row.get::<_, Option>>("prepare_init") + let verify_init_bytes = + row.get::<_, Option>>("verify_init") .ok_or_else(|| { Error::DbState( - "report aggregation in state INIT_PROCESSING but prepare_init is NULL" + "report aggregation in state INIT_PROCESSING but verify_init is NULL" .to_string(), ) })?; @@ -2462,10 +2462,10 @@ WHERE report_aggregations.task_id = $1 ) })?; - let prepare_init = PrepareInit::get_decoded(&prepare_init_bytes)?; + let verify_init = VerifyInit::get_decoded(&verify_init_bytes)?; ReportAggregationState::HelperInitProcessing { - prepare_init, + verify_init, require_taskbind_extension, } } @@ -2473,17 +2473,17 @@ WHERE report_aggregations.task_id = $1 ReportAggregationStateCode::Continue => { match role { Role::Leader => { - let leader_prep_transition_bytes = row - .get::<_, Option>>("leader_prep_transition") + let leader_verify_transition_bytes = row + .get::<_, Option>>("leader_verify_transition") .ok_or_else(|| { Error::DbState( - "report aggregation in state CONTINUE but leader_prep_transition is NULL" + "report aggregation in state CONTINUE but leader_verify_transition is NULL" .to_string(), ) })?; let ping_pong_transition = PingPongContinuation::get_decoded_with_param( &(vdaf, 0 /* leader */), - &leader_prep_transition_bytes, + &leader_verify_transition_bytes, )?; ReportAggregationState::LeaderContinue { @@ -2491,36 +2491,36 @@ WHERE report_aggregations.task_id = $1 } } Role::Helper => { - let helper_prep_state_bytes = row - .get::<_, Option>>("helper_prep_state") + let helper_verify_state_bytes = row + .get::<_, Option>>("helper_verify_state") .ok_or_else(|| { Error::DbState( - "report aggregation in state CONTINUE but helper_prep_state is NULL" + "report aggregation in state CONTINUE but helper_verify_state is NULL" .to_string(), ) })?; - let prepare_state = A::VerifyState::get_decoded_with_param( + let verify_state = A::VerifyState::get_decoded_with_param( &(vdaf, 1 /* helper */), - &helper_prep_state_bytes, + &helper_verify_state_bytes, )?; - ReportAggregationState::HelperContinue { prepare_state } + ReportAggregationState::HelperContinue { verify_state } } _ => panic!("unexpected role"), } } ReportAggregationStateCode::ContinueProcessing => { - let helper_prep_state_bytes = row - .get::<_, Option>>("helper_prep_state") + let helper_verify_state_bytes = row + .get::<_, Option>>("helper_verify_state") .ok_or_else(|| { Error::DbState( - "report aggregation in state CONTINUE_PROCESSING but helper_prep_state is NULL" + "report aggregation in state CONTINUE_PROCESSING but helper_verify_state is NULL" .to_string(), ) })?; - let prepare_continue_bytes = row - .get::<_, Option>>("prepare_continue") + let verify_continue_bytes = row + .get::<_, Option>>("verify_continue") .ok_or_else(|| { Error::DbState( "report aggregation in state CONTINUE_PROCESSING but message is NULL" @@ -2528,29 +2528,29 @@ WHERE report_aggregations.task_id = $1 ) })?; - let prepare_state = A::VerifyState::get_decoded_with_param( + let verify_state = A::VerifyState::get_decoded_with_param( &(vdaf, 1 /* helper */), - &helper_prep_state_bytes, + &helper_verify_state_bytes, )?; - let prepare_continue = PrepareContinue::get_decoded(&prepare_continue_bytes)?; + let verify_continue = VerifyContinue::get_decoded(&verify_continue_bytes)?; ReportAggregationState::HelperContinueProcessing { - prepare_state, - prepare_continue, + verify_state, + verify_continue, } } ReportAggregationStateCode::PollInit => { - row.get::<_, Option>>("leader_prep_state") + row.get::<_, Option>>("leader_verify_state") .ok_or_else(|| { Error::DbState( - "report aggregation in state POLL_INIT but leader_prep_state is NULL" + "report aggregation in state POLL_INIT but leader_verify_state is NULL" .to_string(), ) }) .and_then(|encoded| { Ok(ReportAggregationState::LeaderPollInit { - prepare_state: A::VerifyState::get_decoded_with_param( + verify_state: A::VerifyState::get_decoded_with_param( &(vdaf, 0 /* leader */), &encoded, )?, @@ -2559,10 +2559,10 @@ WHERE report_aggregations.task_id = $1 } ReportAggregationStateCode::PollContinue => { - row.get::<_, Option>>("leader_prep_transition") + row.get::<_, Option>>("leader_verify_transition") .ok_or_else(|| { Error::DbState( - "report aggregation in state POLL_CONTINUE but leader_prep_transition is NULL" + "report aggregation in state POLL_CONTINUE but leader_verify_transition is NULL" .to_string(), ) }) @@ -2606,7 +2606,7 @@ WHERE report_aggregations.task_id = $1 *report_id, time, ord, - last_prep_resp, + last_verify_resp, agg_state, )) } @@ -2624,9 +2624,9 @@ WHERE report_aggregations.task_id = $1 let now = self.clock.now(); let encoded_state_values = report_aggregation.state().encoded_values_from_state()?; - let encoded_last_prep_resp: Option> = report_aggregation - .last_prep_resp() - .map(PrepareResp::get_encoded) + let encoded_last_verify_resp: Option> = report_aggregation + .last_verify_resp() + .map(VerifyResp::get_encoded) .transpose()?; // If there is a conflict, the we upsert the incoming report agggregation (excluded) if the @@ -2638,11 +2638,11 @@ WHERE report_aggregations.task_id = $1 "-- put_report_aggregation() INSERT INTO report_aggregations (task_id, aggregation_job_id, ord, client_report_id, client_timestamp, - last_prep_resp, state, public_extensions, public_share, + last_verify_resp, state, public_extensions, public_share, leader_private_extensions, leader_input_share, - helper_encrypted_input_share, leader_prep_transition, leader_prep_state, - prepare_init, require_taskbind_extension, - helper_prep_state, prepare_continue, error_code, created_at, updated_at, + helper_encrypted_input_share, leader_verify_transition, leader_verify_state, + verify_init, require_taskbind_extension, + helper_verify_state, verify_continue, error_code, created_at, updated_at, updated_by) SELECT $1, aggregation_jobs.id, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, @@ -2652,21 +2652,21 @@ WHERE task_id = $1 AND aggregation_job_id = $2 ON CONFLICT(task_id, aggregation_job_id, ord) DO UPDATE SET ( - client_report_id, client_timestamp, last_prep_resp, state, + client_report_id, client_timestamp, last_verify_resp, state, public_extensions, public_share, leader_private_extensions, leader_input_share, helper_encrypted_input_share, - leader_prep_transition, leader_prep_state, - prepare_init, require_taskbind_extension, helper_prep_state, - prepare_continue, error_code, created_at, updated_at, updated_by + leader_verify_transition, leader_verify_state, + verify_init, require_taskbind_extension, helper_verify_state, + verify_continue, error_code, created_at, updated_at, updated_by ) = ( excluded.client_report_id, excluded.client_timestamp, - excluded.last_prep_resp, excluded.state, excluded.public_extensions, + excluded.last_verify_resp, excluded.state, excluded.public_extensions, excluded.public_share, excluded.leader_private_extensions, excluded.leader_input_share, excluded.helper_encrypted_input_share, - excluded.leader_prep_transition, excluded.leader_prep_state, - excluded.prepare_init, - excluded.require_taskbind_extension, excluded.helper_prep_state, - excluded.prepare_continue, excluded.error_code, excluded.created_at, + excluded.leader_verify_transition, excluded.leader_verify_state, + excluded.verify_init, + excluded.require_taskbind_extension, excluded.helper_verify_state, + excluded.verify_continue, excluded.error_code, excluded.created_at, excluded.updated_at, excluded.updated_by ) WHERE (SELECT UPPER(client_timestamp_interval) @@ -2687,7 +2687,7 @@ ON CONFLICT(task_id, aggregation_job_id, ord) DO UPDATE &report_aggregation .time() .as_date_time(task_info.time_precision)?, - /* last_prep_resp */ &encoded_last_prep_resp, + /* last_verify_resp */ &encoded_last_verify_resp, /* state */ &report_aggregation.state().state_code(), /* public_extensions */ &encoded_state_values.public_extensions, /* public_share */ &encoded_state_values.public_share, @@ -2696,14 +2696,14 @@ ON CONFLICT(task_id, aggregation_job_id, ord) DO UPDATE /* leader_input_share */ &encoded_state_values.leader_input_share, /* helper_encrypted_input_share */ &encoded_state_values.helper_encrypted_input_share, - /* leader_prep_transition */ - &encoded_state_values.leader_prep_continuation, - /* leader_prep_state */ &encoded_state_values.leader_prep_state, - /* prepare_init */ &encoded_state_values.prepare_init, + /* leader_verify_transition */ + &encoded_state_values.leader_verify_continuation, + /* leader_verify_state */ &encoded_state_values.leader_verify_state, + /* verify_init */ &encoded_state_values.verify_init, /* require_taskbind_extension */ &encoded_state_values.require_taskbind_extension, - /* helper_prep_state */ &encoded_state_values.helper_prep_state, - /* prepare_continue */ &encoded_state_values.prepare_continue, + /* helper_verify_state */ &encoded_state_values.helper_verify_state, + /* verify_continue */ &encoded_state_values.verify_continue, /* error_code */ &encoded_state_values.report_error, /* created_at */ &now, /* updated_at */ &now, @@ -2758,20 +2758,20 @@ WHERE aggregation_jobs.task_id = $1 AND aggregation_job_id = $2 ON CONFLICT(task_id, aggregation_job_id, ord) DO UPDATE SET ( - client_report_id, client_timestamp, last_prep_resp, state, + client_report_id, client_timestamp, last_verify_resp, state, public_extensions, public_share, leader_private_extensions, leader_input_share, helper_encrypted_input_share, - leader_prep_transition, leader_prep_state, - prepare_init, helper_prep_state, prepare_continue, error_code, + leader_verify_transition, leader_verify_state, + verify_init, helper_verify_state, verify_continue, error_code, created_at, updated_at, updated_by ) = ( excluded.client_report_id, excluded.client_timestamp, - excluded.last_prep_resp, excluded.state, excluded.public_extensions, + excluded.last_verify_resp, excluded.state, excluded.public_extensions, excluded.public_share, excluded.leader_private_extensions, excluded.leader_input_share, excluded.helper_encrypted_input_share, - excluded.leader_prep_transition, excluded.leader_prep_state, - excluded.prepare_init, - excluded.helper_prep_state, excluded.prepare_continue, + excluded.leader_verify_transition, excluded.leader_verify_state, + excluded.verify_init, + excluded.helper_verify_state, excluded.verify_continue, excluded.error_code, excluded.created_at, excluded.updated_at, excluded.updated_by ) @@ -2823,20 +2823,20 @@ WHERE aggregation_jobs.task_id = $1 AND aggregation_job_id = $2 ON CONFLICT(task_id, aggregation_job_id, ord) DO UPDATE SET ( - client_report_id, client_timestamp, last_prep_resp, state, + client_report_id, client_timestamp, last_verify_resp, state, public_extensions, public_share, leader_private_extensions, leader_input_share, helper_encrypted_input_share, - leader_prep_transition, leader_prep_state, - prepare_init, helper_prep_state, prepare_continue, error_code, + leader_verify_transition, leader_verify_state, + verify_init, helper_verify_state, verify_continue, error_code, created_at, updated_at, updated_by ) = ( excluded.client_report_id, excluded.client_timestamp, - excluded.last_prep_resp, excluded.state, excluded.public_extensions, + excluded.last_verify_resp, excluded.state, excluded.public_extensions, excluded.public_share, excluded.leader_private_extensions, excluded.leader_input_share, excluded.helper_encrypted_input_share, - excluded.leader_prep_transition, excluded.leader_prep_state, - excluded.prepare_init, - excluded.helper_prep_state, excluded.prepare_continue, + excluded.leader_verify_transition, excluded.leader_verify_state, + excluded.verify_init, + excluded.helper_verify_state, excluded.verify_continue, excluded.error_code, excluded.created_at, excluded.updated_at, excluded.updated_by ) @@ -2889,9 +2889,9 @@ ON CONFLICT(task_id, aggregation_job_id, ord) DO UPDATE let now = self.clock.now(); let encoded_state_values = report_aggregation.state().encoded_values_from_state()?; - let encoded_last_prep_resp: Option> = report_aggregation - .last_prep_resp() - .map(PrepareResp::get_encoded) + let encoded_last_verify_resp: Option> = report_aggregation + .last_verify_resp() + .map(VerifyResp::get_encoded) .transpose()?; let stmt = self @@ -2899,12 +2899,12 @@ ON CONFLICT(task_id, aggregation_job_id, ord) DO UPDATE "-- update_report_aggregation() UPDATE report_aggregations SET - last_prep_resp = $1, state = $2, public_extensions = $3, public_share = $4, + last_verify_resp = $1, state = $2, public_extensions = $3, public_share = $4, leader_private_extensions = $5, leader_input_share = $6, - helper_encrypted_input_share = $7, leader_prep_transition = $8, - leader_prep_state = $9, prepare_init = $10, - require_taskbind_extension = $11, helper_prep_state = $12, - prepare_continue = $13, error_code = $14, updated_at = $15, + helper_encrypted_input_share = $7, leader_verify_transition = $8, + leader_verify_state = $9, verify_init = $10, + require_taskbind_extension = $11, helper_verify_state = $12, + verify_continue = $13, error_code = $14, updated_at = $15, updated_by = $16 FROM aggregation_jobs WHERE report_aggregations.aggregation_job_id = aggregation_jobs.id @@ -2921,7 +2921,7 @@ WHERE report_aggregations.aggregation_job_id = aggregation_jobs.id self.execute( &stmt, &[ - /* last_prep_resp */ &encoded_last_prep_resp, + /* last_verify_resp */ &encoded_last_verify_resp, /* state */ &report_aggregation.state().state_code(), /* public_extensions */ &encoded_state_values.public_extensions, /* public_share */ &encoded_state_values.public_share, @@ -2930,14 +2930,14 @@ WHERE report_aggregations.aggregation_job_id = aggregation_jobs.id /* leader_input_share */ &encoded_state_values.leader_input_share, /* helper_encrypted_input_share */ &encoded_state_values.helper_encrypted_input_share, - /* leader_prep_transition */ - &encoded_state_values.leader_prep_continuation, - /* leader_prep_state */ &encoded_state_values.leader_prep_state, - /* prepare_init */ &encoded_state_values.prepare_init, + /* leader_verify_transition */ + &encoded_state_values.leader_verify_continuation, + /* leader_verify_state */ &encoded_state_values.leader_verify_state, + /* verify_init */ &encoded_state_values.verify_init, /* require_taskbind_extension */ &encoded_state_values.require_taskbind_extension, - /* helper_prep_state */ &encoded_state_values.helper_prep_state, - /* prepare_continue */ &encoded_state_values.prepare_continue, + /* helper_verify_state */ &encoded_state_values.helper_verify_state, + /* verify_continue */ &encoded_state_values.verify_continue, /* error_code */ &encoded_state_values.report_error, /* updated_at */ &now, /* updated_by */ &self.name, diff --git a/aggregator_core/src/datastore/models.rs b/aggregator_core/src/datastore/models.rs index fc424fdf2..40ec5bfee 100644 --- a/aggregator_core/src/datastore/models.rs +++ b/aggregator_core/src/datastore/models.rs @@ -20,9 +20,9 @@ use janus_core::{ }; use janus_messages::{ AggregateShareId, AggregationJobId, AggregationJobStep, BatchId, CollectionJobId, Duration, - Extension, HpkeCiphertext, HpkeConfigId, Interval, PrepareContinue, PrepareInit, PrepareResp, - Query, ReportError, ReportId, ReportIdChecksum, ReportMetadata, Role, TaskId, Time, - TimePrecision, + Extension, HpkeCiphertext, HpkeConfigId, Interval, Query, ReportError, ReportId, + ReportIdChecksum, ReportMetadata, Role, TaskId, Time, TimePrecision, VerifyContinue, + VerifyInit, VerifyResp, batch_mode::{BatchMode, LeaderSelected, TimeInterval}, }; use postgres_protocol::types::{ @@ -374,7 +374,7 @@ pub struct AggregationJob> /// corresponds to the AGGREGATION_JOB_STATE enum in the schema. /// /// These are implementation-specific states used for Janus's internal state management. -/// DAP ยง4.6 [dap-16] defines aggregation job completion in terms of individual report -/// preparation states (Continued, FinishedWithOutbound, Finished, Rejected), not job-level +/// DAP ยง4.6 [dap-16] defines aggregation job completion in terms of individual +/// report verification states (Continued, FinishedWithOutbound, Finished, Rejected), not job-level /// states. This enum provides operational states for managing the lifecycle of aggregation /// jobs within Janus. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, ToSql, FromSql)] @@ -798,9 +798,9 @@ impl AcquiredCollectionJob { /// ReportAggregation represents a the state of a single client report's ongoing aggregation. #[derive(Clone, Debug)] -// PartialEq and Eq are gated on the `test-util` feature as we do not wish to compare preparation +// PartialEq and Eq are gated on the `test-util` feature as we do not wish to compare verification // states in non-test code, since doing so would require a constant-time comparison to avoid risking -// leaking information about the preparation state. +// leaking information about the verification state. #[cfg_attr(feature = "test-util", derive(Educe))] #[cfg_attr(feature = "test-util", educe(PartialEq, Eq))] pub struct ReportAggregation> { @@ -809,7 +809,7 @@ pub struct ReportAggregation, + last_verify_resp: Option, state: ReportAggregationState, } @@ -821,7 +821,7 @@ impl> ReportAggregation, + last_verify_resp: Option, state: ReportAggregationState, ) -> Self { Self { @@ -830,7 +830,7 @@ impl> ReportAggregation> ReportAggregation Option<&PrepareResp> { - self.last_prep_resp.as_ref() + /// Returns the last verification response returned by the Helper, if any. + pub fn last_verify_resp(&self) -> Option<&VerifyResp> { + self.last_verify_resp.as_ref() } /// Returns a new [`ReportAggregation`] corresponding to this report aggregation updated to - /// have the given last preparation response. - pub fn with_last_prep_resp(self, last_prep_resp: Option) -> Self { + /// have the given last verification response. + pub fn with_last_verify_resp(self, last_verify_resp: Option) -> Self { Self { - last_prep_resp, + last_verify_resp, ..self } } @@ -928,7 +928,7 @@ pub enum ReportAggregationState> ReportAggregationSta } /// Returns the encoded values for the various messages which might be included in a - /// ReportAggregationState. The order of returned values is preparation state, preparation + /// ReportAggregationState. The order of returned values is verification state, verification /// message, output share, transition error. pub(super) fn encoded_values_from_state( &self, @@ -1048,40 +1048,40 @@ impl> ReportAggregationSta ReportAggregationState::LeaderContinue { continuation } | ReportAggregationState::LeaderPollContinue { continuation } => { EncodedReportAggregationStateValues { - leader_prep_continuation: Some(continuation.get_encoded()?), + leader_verify_continuation: Some(continuation.get_encoded()?), ..Default::default() } } - ReportAggregationState::LeaderPollInit { prepare_state } => { + ReportAggregationState::LeaderPollInit { verify_state } => { EncodedReportAggregationStateValues { - leader_prep_state: Some(prepare_state.get_encoded()?), + leader_verify_state: Some(verify_state.get_encoded()?), ..Default::default() } } ReportAggregationState::HelperInitProcessing { - prepare_init, + verify_init, require_taskbind_extension, } => EncodedReportAggregationStateValues { - prepare_init: Some(prepare_init.get_encoded()?), + verify_init: Some(verify_init.get_encoded()?), require_taskbind_extension: Some(*require_taskbind_extension), ..Default::default() }, - ReportAggregationState::HelperContinue { prepare_state } => { + ReportAggregationState::HelperContinue { verify_state } => { EncodedReportAggregationStateValues { - helper_prep_state: Some(prepare_state.get_encoded()?), + helper_verify_state: Some(verify_state.get_encoded()?), ..Default::default() } } ReportAggregationState::HelperContinueProcessing { - prepare_state, - prepare_continue, + verify_state, + verify_continue, } => EncodedReportAggregationStateValues { - helper_prep_state: Some(prepare_state.get_encoded()?), - prepare_continue: Some(prepare_continue.get_encoded()?), + helper_verify_state: Some(verify_state.get_encoded()?), + verify_continue: Some(verify_continue.get_encoded()?), ..Default::default() }, @@ -1107,20 +1107,20 @@ pub(super) struct EncodedReportAggregationStateValues { pub(super) helper_encrypted_input_share: Option>, // State for LeaderContinue or LeaderPollContinue. - pub(super) leader_prep_continuation: Option>, + pub(super) leader_verify_continuation: Option>, // State for LeaderPollInit. - pub(super) leader_prep_state: Option>, + pub(super) leader_verify_state: Option>, // State for HelperInitProcessing. - pub(super) prepare_init: Option>, + pub(super) verify_init: Option>, pub(super) require_taskbind_extension: Option, // State for HelperContinue & HelperContinueProcessing. - pub(super) helper_prep_state: Option>, + pub(super) helper_verify_state: Option>, // State for HelperContinueProcessing. - pub(super) prepare_continue: Option>, + pub(super) verify_continue: Option>, // State for Failed. pub(super) report_error: Option, @@ -1151,8 +1151,8 @@ pub(super) enum ReportAggregationStateCode { } // This trait implementation is gated on the `test-util` feature as we do not wish to compare -// preparation states in non-test code, since doing so would require a constant-time comparison to -// avoid risking leaking information about the preparation state. +// verification states in non-test code, since doing so would require a constant-time comparison to +// avoid risking leaking information about the verification state. #[cfg(feature = "test-util")] impl> PartialEq for ReportAggregationState @@ -1193,10 +1193,10 @@ impl> PartialEq ( Self::LeaderPollInit { - prepare_state: lhs_leader_state, + verify_state: lhs_leader_state, }, Self::LeaderPollInit { - prepare_state: rhs_leader_state, + verify_state: rhs_leader_state, }, ) => lhs_leader_state == rhs_leader_state, @@ -1211,37 +1211,37 @@ impl> PartialEq ( Self::HelperInitProcessing { - prepare_init: lhs_prepare_init, + verify_init: lhs_verify_init, require_taskbind_extension: lhs_require_taskbind_extension, }, Self::HelperInitProcessing { - prepare_init: rhs_prepare_init, + verify_init: rhs_verify_init, require_taskbind_extension: rhs_require_taskbind_extension, }, ) => { - lhs_prepare_init == rhs_prepare_init + lhs_verify_init == rhs_verify_init && lhs_require_taskbind_extension == rhs_require_taskbind_extension } ( Self::HelperContinue { - prepare_state: lhs_state, + verify_state: lhs_state, }, Self::HelperContinue { - prepare_state: rhs_state, + verify_state: rhs_state, }, ) => lhs_state == rhs_state, ( Self::HelperContinueProcessing { - prepare_state: lhs_state, - prepare_continue: lhs_prepare_continue, + verify_state: lhs_state, + verify_continue: lhs_verify_continue, }, Self::HelperContinueProcessing { - prepare_state: rhs_state, - prepare_continue: rhs_prepare_continue, + verify_state: rhs_state, + verify_continue: rhs_verify_continue, }, - ) => lhs_state == rhs_state && lhs_prepare_continue == rhs_prepare_continue, + ) => lhs_state == rhs_state && lhs_verify_continue == rhs_verify_continue, ( Self::Failed { @@ -1258,8 +1258,8 @@ impl> PartialEq } // This trait implementation is gated on the `test-util` feature as we do not wish to compare -// preparation states in non-test code, since doing so would require a constant-time comparison to -// avoid risking leaking information about the preparation state. +// verification states in non-test code, since doing so would require a constant-time comparison to +// avoid risking leaking information about the verification state. #[cfg(feature = "test-util")] impl> Eq for ReportAggregationState @@ -1367,7 +1367,7 @@ pub struct BatchAggregation> #[educe(Debug)] pub enum BatchAggregationState> { Aggregating { - /// The aggregate over all the input shares that have been prepared so far by this + /// The aggregate over all the input shares that have been verified so far by this /// aggregator. Will only be None if there are no reports. #[educe(Debug(ignore))] aggregate_share: Option, @@ -1577,7 +1577,7 @@ pub enum BatchAggregationState, @@ -1793,7 +1793,7 @@ pub struct CollectionJob, - /// The VDAF aggregation parameter used to prepare and aggregate input shares. + /// The VDAF aggregation parameter used to verify and aggregate input shares. #[educe(Debug(ignore))] aggregation_parameter: A::AggregationParam, /// The batch interval covered by the collection job. @@ -2004,7 +2004,7 @@ pub struct AggregateShareJob Self { - self.inner.lock().unwrap().vdaf_prep_error = value; + pub fn with_vdaf_verify_error(self, value: u64) -> Self { + self.inner.lock().unwrap().vdaf_verify_error = value; self } @@ -405,7 +405,7 @@ impl TaskAggregationCounter { report_dropped: u64, hpke_unknown_config_id: u64, hpke_decrypt_failure: u64, - vdaf_prep_error: u64, + vdaf_verify_error: u64, task_not_started: u64, task_expired: u64, invalid_message: u64, @@ -415,7 +415,7 @@ impl TaskAggregationCounter { helper_report_dropped: u64, helper_hpke_unknown_config_id: u64, helper_hpke_decrypt_failure: u64, - helper_vdaf_prep_error: u64, + helper_vdaf_verify_error: u64, helper_task_not_started: u64, helper_task_expired: u64, helper_invalid_message: u64, @@ -431,7 +431,7 @@ impl TaskAggregationCounter { report_dropped, hpke_unknown_config_id, hpke_decrypt_failure, - vdaf_prep_error, + vdaf_verify_error, task_not_started, task_expired, invalid_message, @@ -441,7 +441,7 @@ impl TaskAggregationCounter { helper_report_dropped, helper_hpke_unknown_config_id, helper_hpke_decrypt_failure, - helper_vdaf_prep_error, + helper_vdaf_verify_error, helper_task_not_started, helper_task_expired, helper_invalid_message, @@ -475,7 +475,7 @@ SELECT COALESCE(SUM(report_dropped)::BIGINT, 0) AS report_dropped, COALESCE(SUM(hpke_unknown_config_id)::BIGINT, 0) AS hpke_unknown_config_id, COALESCE(SUM(hpke_decrypt_failure)::BIGINT, 0) AS hpke_decrypt_failure, - COALESCE(SUM(vdaf_prep_error)::BIGINT, 0) AS vdaf_prep_error, + COALESCE(SUM(vdaf_verify_error)::BIGINT, 0) AS vdaf_verify_error, COALESCE(SUM(task_not_started)::BIGINT, 0) AS task_not_started, COALESCE(SUM(task_expired)::BIGINT, 0) AS task_expired, COALESCE(SUM(invalid_message)::BIGINT, 0) AS invalid_message, @@ -485,7 +485,7 @@ SELECT COALESCE(SUM(helper_report_dropped)::BIGINT, 0) AS helper_report_dropped, COALESCE(SUM(helper_hpke_unknown_config_id)::BIGINT, 0) AS helper_hpke_unknown_config_id, COALESCE(SUM(helper_hpke_decrypt_failure)::BIGINT, 0) AS helper_hpke_decrypt_failure, - COALESCE(SUM(helper_vdaf_prep_error)::BIGINT, 0) AS helper_vdaf_prep_error, + COALESCE(SUM(helper_vdaf_verify_error)::BIGINT, 0) AS helper_vdaf_verify_error, COALESCE(SUM(helper_task_not_started)::BIGINT, 0) AS helper_task_not_started, COALESCE(SUM(helper_task_expired)::BIGINT, 0) AS helper_task_expired, COALESCE(SUM(helper_invalid_message)::BIGINT, 0) AS helper_invalid_message, @@ -507,7 +507,7 @@ WHERE task_id = $1", row.get_bigint_and_convert("report_dropped")?, row.get_bigint_and_convert("hpke_unknown_config_id")?, row.get_bigint_and_convert("hpke_decrypt_failure")?, - row.get_bigint_and_convert("vdaf_prep_error")?, + row.get_bigint_and_convert("vdaf_verify_error")?, row.get_bigint_and_convert("task_not_started")?, row.get_bigint_and_convert("task_expired")?, row.get_bigint_and_convert("invalid_message")?, @@ -517,7 +517,7 @@ WHERE task_id = $1", row.get_bigint_and_convert("helper_report_dropped")?, row.get_bigint_and_convert("helper_hpke_unknown_config_id")?, row.get_bigint_and_convert("helper_hpke_decrypt_failure")?, - row.get_bigint_and_convert("helper_vdaf_prep_error")?, + row.get_bigint_and_convert("helper_vdaf_verify_error")?, row.get_bigint_and_convert("helper_task_not_started")?, row.get_bigint_and_convert("helper_task_expired")?, row.get_bigint_and_convert("helper_invalid_message")?, @@ -550,10 +550,10 @@ WHERE task_id = $1", "-- increment_task_aggregation_counter() INSERT INTO task_aggregation_counters (task_id, ord, success, duplicate_extension, public_share_encode_failure, batch_collected, report_replayed, report_dropped, -hpke_unknown_config_id, hpke_decrypt_failure, vdaf_prep_error, task_not_started, task_expired, +hpke_unknown_config_id, hpke_decrypt_failure, vdaf_verify_error, task_not_started, task_expired, invalid_message, report_too_early, helper_batch_collected, helper_report_replayed, helper_report_dropped, helper_hpke_unknown_config_id, helper_hpke_decrypt_failure, -helper_vdaf_prep_error, helper_task_not_started, helper_task_expired, helper_invalid_message, +helper_vdaf_verify_error, helper_task_not_started, helper_task_expired, helper_invalid_message, helper_report_too_early) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25) @@ -566,7 +566,7 @@ ON CONFLICT (task_id, ord) DO UPDATE SET report_dropped = task_aggregation_counters.report_dropped + $8, hpke_unknown_config_id = task_aggregation_counters.hpke_unknown_config_id + $9, hpke_decrypt_failure = task_aggregation_counters.hpke_decrypt_failure + $10, - vdaf_prep_error = task_aggregation_counters.vdaf_prep_error + $11, + vdaf_verify_error = task_aggregation_counters.vdaf_verify_error + $11, task_not_started = task_aggregation_counters.task_not_started + $12, task_expired = task_aggregation_counters.task_expired + $13, invalid_message = task_aggregation_counters.invalid_message + $14, @@ -576,7 +576,7 @@ ON CONFLICT (task_id, ord) DO UPDATE SET helper_report_dropped = task_aggregation_counters.helper_report_dropped + $18, helper_hpke_unknown_config_id = task_aggregation_counters.helper_hpke_unknown_config_id + $19, helper_hpke_decrypt_failure = task_aggregation_counters.helper_hpke_decrypt_failure + $20, - helper_vdaf_prep_error = task_aggregation_counters.helper_vdaf_prep_error + $21, + helper_vdaf_verify_error = task_aggregation_counters.helper_vdaf_verify_error + $21, helper_task_not_started = task_aggregation_counters.helper_task_not_started + $22, helper_task_expired = task_aggregation_counters.helper_task_expired + $23, helper_invalid_message = task_aggregation_counters.helper_invalid_message + $24, @@ -600,7 +600,7 @@ ON CONFLICT (task_id, ord) DO UPDATE SET /* hpke_unknown_config_id */ &i64::try_from(inner.hpke_unknown_config_id)?, /* hpke_decrypt_failure */ &i64::try_from(inner.hpke_decrypt_failure)?, - /* vdaf_prep_error */ &i64::try_from(inner.vdaf_prep_error)?, + /* vdaf_verify_error */ &i64::try_from(inner.vdaf_verify_error)?, /* task_not_started */ &i64::try_from(inner.task_not_started)?, /* task_expired */ &i64::try_from(inner.task_expired)?, /* invalid_message */ &i64::try_from(inner.invalid_message)?, @@ -615,8 +615,8 @@ ON CONFLICT (task_id, ord) DO UPDATE SET &i64::try_from(inner.helper_hpke_unknown_config_id)?, /* helper_hpke_decrypt_failure */ &i64::try_from(inner.helper_hpke_decrypt_failure)?, - /* helper_vdaf_prep_error */ - &i64::try_from(inner.helper_vdaf_prep_error)?, + /* helper_vdaf_verify_error */ + &i64::try_from(inner.helper_vdaf_verify_error)?, /* helper_task_not_started */ &i64::try_from(inner.helper_task_not_started)?, /* helper_task_expired */ &i64::try_from(inner.helper_task_expired)?, @@ -641,7 +641,7 @@ ON CONFLICT (task_id, ord) DO UPDATE SET self.inner.lock().unwrap().success += 1 } - /// Increments the appropriate counter based on the prepare failure. + /// Increments the appropriate counter based on the verify failure. pub fn increment_with_report_error(&self, error: ReportError) { match error { ReportError::BatchCollected => self.inner.lock().unwrap().batch_collected += 1, @@ -651,16 +651,16 @@ ON CONFLICT (task_id, ord) DO UPDATE SET self.inner.lock().unwrap().hpke_unknown_config_id += 1 } ReportError::HpkeDecryptError => self.inner.lock().unwrap().hpke_decrypt_failure += 1, - ReportError::VdafPrepError => self.inner.lock().unwrap().vdaf_prep_error += 1, + ReportError::VdafVerifyError => self.inner.lock().unwrap().vdaf_verify_error += 1, ReportError::TaskNotStarted => self.inner.lock().unwrap().task_not_started += 1, ReportError::TaskExpired => self.inner.lock().unwrap().task_expired += 1, ReportError::InvalidMessage => self.inner.lock().unwrap().invalid_message += 1, ReportError::ReportTooEarly => self.inner.lock().unwrap().report_too_early += 1, - _ => tracing::debug!(?error, "unexpected prepare error"), + _ => tracing::debug!(?error, "unexpected verify error"), } } - /// Increments the appropriate counter based on the helper prepare failure. + /// Increments the appropriate counter based on the helper verify failure. pub fn increment_with_helper_report_error(&self, helper_error: ReportError) { match helper_error { ReportError::BatchCollected => self.inner.lock().unwrap().helper_batch_collected += 1, @@ -672,12 +672,14 @@ ON CONFLICT (task_id, ord) DO UPDATE SET ReportError::HpkeDecryptError => { self.inner.lock().unwrap().helper_hpke_decrypt_failure += 1 } - ReportError::VdafPrepError => self.inner.lock().unwrap().helper_vdaf_prep_error += 1, + ReportError::VdafVerifyError => { + self.inner.lock().unwrap().helper_vdaf_verify_error += 1 + } ReportError::TaskNotStarted => self.inner.lock().unwrap().helper_task_not_started += 1, ReportError::TaskExpired => self.inner.lock().unwrap().helper_task_expired += 1, ReportError::InvalidMessage => self.inner.lock().unwrap().helper_invalid_message += 1, ReportError::ReportTooEarly => self.inner.lock().unwrap().helper_report_too_early += 1, - _ => tracing::debug!(?helper_error, "unexpected prepare error from helper"), + _ => tracing::debug!(?helper_error, "unexpected verify error from helper"), } } } diff --git a/aggregator_core/src/datastore/tests.rs b/aggregator_core/src/datastore/tests.rs index 4485c56aa..80c7b5db3 100644 --- a/aggregator_core/src/datastore/tests.rs +++ b/aggregator_core/src/datastore/tests.rs @@ -25,8 +25,8 @@ use janus_core::{ use janus_messages::{ AggregateShareAad, AggregationJobId, AggregationJobStep, BatchId, BatchSelector, CollectionJobId, Duration, Extension, ExtensionType, HpkeCiphertext, HpkeConfigId, Interval, - PrepareContinue, PrepareInit, PrepareResp, PrepareStepResult, Query, ReportError, ReportId, - ReportIdChecksum, ReportMetadata, ReportShare, Role, TaskId, Time, TimePrecision, + Query, ReportError, ReportId, ReportIdChecksum, ReportMetadata, ReportShare, Role, TaskId, + Time, TimePrecision, VerifyContinue, VerifyInit, VerifyResp, VerifyStepResult, batch_mode::{BatchMode, LeaderSelected, TimeInterval}, }; use postgres_types::Timestamp; @@ -2668,7 +2668,7 @@ async fn roundtrip_report_aggregation(ephemeral_datastore: EphemeralDatastore) { ( Role::Leader, ReportAggregationState::LeaderContinue { - continuation: vdaf_transcript.leader_prepare_transitions[1] + continuation: vdaf_transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -2677,13 +2677,13 @@ async fn roundtrip_report_aggregation(ephemeral_datastore: EphemeralDatastore) { ( Role::Leader, ReportAggregationState::LeaderPollInit { - prepare_state: *vdaf_transcript.leader_prepare_transitions[0].prepare_state(), + verify_state: *vdaf_transcript.leader_verify_transitions[0].verify_state(), }, ), ( Role::Leader, ReportAggregationState::LeaderPollContinue { - continuation: vdaf_transcript.leader_prepare_transitions[1] + continuation: vdaf_transcript.leader_verify_transitions[1] .continuation .clone() .unwrap(), @@ -2692,7 +2692,7 @@ async fn roundtrip_report_aggregation(ephemeral_datastore: EphemeralDatastore) { ( Role::Helper, ReportAggregationState::HelperInitProcessing { - prepare_init: PrepareInit::new( + verify_init: VerifyInit::new( ReportShare::new( ReportMetadata::new( report_id, @@ -2706,7 +2706,7 @@ async fn roundtrip_report_aggregation(ephemeral_datastore: EphemeralDatastore) { Vec::from("payload"), ), ), - vdaf_transcript.leader_prepare_transitions[0] + vdaf_transcript.leader_verify_transitions[0] .message() .unwrap() .clone(), @@ -2717,16 +2717,16 @@ async fn roundtrip_report_aggregation(ephemeral_datastore: EphemeralDatastore) { ( Role::Helper, ReportAggregationState::HelperContinue { - prepare_state: *vdaf_transcript.helper_prepare_transitions[0].prepare_state(), + verify_state: *vdaf_transcript.helper_verify_transitions[0].verify_state(), }, ), ( Role::Helper, ReportAggregationState::HelperContinueProcessing { - prepare_state: *vdaf_transcript.helper_prepare_transitions[0].prepare_state(), - prepare_continue: PrepareContinue::new( + verify_state: *vdaf_transcript.helper_verify_transitions[0].verify_state(), + verify_continue: VerifyContinue::new( report_id, - vdaf_transcript.leader_prepare_transitions[1] + vdaf_transcript.leader_verify_transitions[1] .message() .unwrap() .clone(), @@ -2738,13 +2738,13 @@ async fn roundtrip_report_aggregation(ephemeral_datastore: EphemeralDatastore) { ( Role::Leader, ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, ), ( Role::Helper, ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, ), ] @@ -2777,9 +2777,9 @@ async fn roundtrip_report_aggregation(ephemeral_datastore: EphemeralDatastore) { report_id, START_TIME, ord.try_into().unwrap(), - Some(PrepareResp::new( + Some(VerifyResp::new( report_id, - PrepareStepResult::Continue { + VerifyStepResult::Continue { message: PingPongMessage::Continue { verifier_message: format!("verifier_message_{ord}").into(), verifier_share: format!("verifier_share_{ord}").into(), @@ -2867,9 +2867,9 @@ WHERE client_report_id = $1", *want_report_aggregation.report_id(), *want_report_aggregation.time(), want_report_aggregation.ord(), - Some(PrepareResp::new( + Some(VerifyResp::new( report_id, - PrepareStepResult::Continue { + VerifyStepResult::Continue { message: PingPongMessage::Continue { verifier_message: format!("updated_verifier_message_{ord}").into(), verifier_share: format!("updated_verifier_share_{ord}").into(), @@ -3018,7 +3018,7 @@ async fn report_aggregation_not_found(ephemeral_datastore: EphemeralDatastore) { 0, None, ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, )) .await @@ -3095,12 +3095,11 @@ async fn get_report_aggregations_for_aggregation_job(ephemeral_datastore: Epheme ), }, ReportAggregationState::HelperContinue { - prepare_state: *vdaf_transcript.helper_prepare_transitions[0] - .prepare_state(), + verify_state: *vdaf_transcript.helper_verify_transitions[0].verify_state(), }, ReportAggregationState::Finished, ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, ] .iter() @@ -3117,7 +3116,7 @@ async fn get_report_aggregations_for_aggregation_job(ephemeral_datastore: Epheme report_id, START_TIME, ord.try_into().unwrap(), - Some(PrepareResp::new(report_id, PrepareStepResult::Finished)), + Some(VerifyResp::new(report_id, VerifyStepResult::Finished)), state.clone(), ); tx.put_report_aggregation(&report_aggregation) @@ -6515,7 +6514,7 @@ async fn roundtrip_outstanding_batch(ephemeral_datastore: EphemeralDatastore) { None, // Counted among max_size. ReportAggregationState::LeaderContinue { - continuation: transcript.helper_prepare_transitions[0] + continuation: transcript.helper_verify_transitions[0] .continuation .clone() .unwrap(), @@ -6529,7 +6528,7 @@ async fn roundtrip_outstanding_batch(ephemeral_datastore: EphemeralDatastore) { 2, None, ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, // Not counted among min_size or max_size. ); @@ -6568,7 +6567,7 @@ async fn roundtrip_outstanding_batch(ephemeral_datastore: EphemeralDatastore) { 2, None, ReportAggregationState::Failed { - report_error: ReportError::VdafPrepError, + report_error: ReportError::VdafVerifyError, }, // Not counted among min_size or max_size. ); diff --git a/aggregator_core/src/task.rs b/aggregator_core/src/task.rs index d5cf1c3e9..df3a09c87 100644 --- a/aggregator_core/src/task.rs +++ b/aggregator_core/src/task.rs @@ -198,7 +198,7 @@ impl CommonTaskParameters { }) } - /// Returns the [`VerifyKey`] used by this aggregator to prepare report shares with other + /// Returns the [`VerifyKey`] used by this aggregator to verify report shares with other /// aggregators. /// /// # Errors @@ -360,7 +360,7 @@ impl AggregatorTask { batch_size >= self.common_parameters.min_batch_size } - /// Returns the [`VerifyKey`] used by this aggregator to prepare report shares with other + /// Returns the [`VerifyKey`] used by this aggregator to verify report shares with other /// aggregators. /// /// # Errors @@ -974,7 +974,7 @@ pub mod test_util { &self.collector_auth_token } - /// Returns the [`VerifyKey`] used by this aggregator to prepare report shares with other + /// Returns the [`VerifyKey`] used by this aggregator to verify report shares with other /// aggregators. /// /// # Errors diff --git a/core/src/test_util/mod.rs b/core/src/test_util/mod.rs index 7ae00b68d..f6390feeb 100644 --- a/core/src/test_util/mod.rs +++ b/core/src/test_util/mod.rs @@ -19,7 +19,7 @@ pub mod runtime; pub mod testcontainers; #[derive(Clone, Debug)] -pub struct PrepareTransition +pub struct VerifyTransition where V: vdaf::Aggregator, { @@ -27,11 +27,11 @@ where pub state: PingPongState, } -impl PrepareTransition +impl VerifyTransition where V: vdaf::Aggregator, { - pub fn prepare_state(&self) -> &V::VerifyState { + pub fn verify_state(&self) -> &V::VerifyState { assert_matches!(self.state, PingPongState::Continued(Continued{ ref verifier_state, .. }) => verifier_state) @@ -63,12 +63,12 @@ where /// The leader's states and messages computed throughout the protocol run. Indexed by the /// aggregation job step. #[allow(clippy::type_complexity)] - pub leader_prepare_transitions: Vec>, + pub leader_verify_transitions: Vec>, /// The helper's states and messages computed throughout the protocol run. Indexed by the /// aggregation job step. #[allow(clippy::type_complexity)] - pub helper_prepare_transitions: Vec>, + pub helper_verify_transitions: Vec>, /// The leader's computed output share. pub leader_output_share: V::OutputShare, @@ -98,10 +98,10 @@ where { let ctx = vdaf_application_context(task_id); - let mut leader_prepare_transitions = Vec::new(); - let mut helper_prepare_transitions = Vec::new(); + let mut leader_verify_transitions = Vec::new(); + let mut helper_verify_transitions = Vec::new(); - // Shard inputs into input shares, and initialize the initial PrepareTransitions. + // Shard inputs into input shares, and initialize the initial VerifyTransitions. let (public_share, input_shares) = vdaf.shard(&ctx, measurement, report_id.as_ref()).unwrap(); let leader_state = vdaf @@ -115,7 +115,7 @@ where ) .unwrap(); - leader_prepare_transitions.push(PrepareTransition { + leader_verify_transitions.push(VerifyTransition { continuation: None, state: PingPongState::Continued(leader_state.clone()), }); @@ -133,7 +133,7 @@ where .unwrap(); let helper_state = helper_transition.evaluate(&ctx, vdaf).unwrap(); - helper_prepare_transitions.push(PrepareTransition { + helper_verify_transitions.push(VerifyTransition { continuation: Some(helper_transition), state: helper_state, }); @@ -145,12 +145,12 @@ where for role in [Role::Leader, Role::Helper] { let (curr_state, last_peer_message) = match role { Role::Leader => ( - leader_prepare_transitions.last().unwrap().state.clone(), - helper_prepare_transitions.last().unwrap().message(), + leader_verify_transitions.last().unwrap().state.clone(), + helper_verify_transitions.last().unwrap().message(), ), Role::Helper => ( - helper_prepare_transitions.last().unwrap().state.clone(), - leader_prepare_transitions.last().unwrap().message(), + helper_verify_transitions.last().unwrap().state.clone(), + leader_verify_transitions.last().unwrap().message(), ), _ => panic!(), }; @@ -180,11 +180,11 @@ where let state = continuation.evaluate(&ctx, vdaf).unwrap(); match role { - Role::Leader => leader_prepare_transitions.push(PrepareTransition { + Role::Leader => leader_verify_transitions.push(VerifyTransition { continuation: Some(continuation), state, }), - Role::Helper => helper_prepare_transitions.push(PrepareTransition { + Role::Helper => helper_verify_transitions.push(VerifyTransition { continuation: Some(continuation), state, }), @@ -216,8 +216,8 @@ where public_share, leader_input_share: input_shares[0].clone(), helper_input_share: input_shares[1].clone(), - leader_prepare_transitions, - helper_prepare_transitions, + leader_verify_transitions, + helper_verify_transitions, leader_output_share: leader_output_share.unwrap(), helper_output_share: helper_output_share.unwrap(), leader_aggregate_share, diff --git a/core/src/vdaf.rs b/core/src/vdaf.rs index db8ba74dc..18b6fa25c 100644 --- a/core/src/vdaf.rs +++ b/core/src/vdaf.rs @@ -108,14 +108,14 @@ pub enum VdafInstance { #[cfg(feature = "test-util")] #[cfg_attr(docsrs, doc(cfg(feature = "test-util")))] Fake { rounds: u32 }, - /// A fake, no-op VDAF that always fails during initialization of input preparation. + /// A fake, no-op VDAF that always fails during initialization of input verification. #[cfg(feature = "test-util")] #[cfg_attr(docsrs, doc(cfg(feature = "test-util")))] - FakeFailsPrepInit, - /// A fake, no-op VDAF that always fails when stepping input preparation. + FakeFailsVerifyInit, + /// A fake, no-op VDAF that always fails when stepping input verification. #[cfg(feature = "test-util")] #[cfg_attr(docsrs, doc(cfg(feature = "test-util")))] - FakeFailsPrepStep, + FakeFailsVerifyStep, } impl VdafInstance { @@ -124,8 +124,8 @@ impl VdafInstance { match self { #[cfg(feature = "test-util")] VdafInstance::Fake { .. } - | VdafInstance::FakeFailsPrepInit - | VdafInstance::FakeFailsPrepStep => 0, + | VdafInstance::FakeFailsVerifyInit + | VdafInstance::FakeFailsVerifyStep => 0, VdafInstance::Prio3SumVecField64MultiproofHmacSha256Aes128 { .. } => { VERIFY_KEY_LENGTH_PRIO3_HMACSHA256_AES128 @@ -340,10 +340,10 @@ macro_rules! vdaf_dispatch_impl_test_util { $body } - ::janus_core::vdaf::VdafInstance::FakeFailsPrepInit => { + ::janus_core::vdaf::VdafInstance::FakeFailsVerifyInit => { let $vdaf = ::prio::vdaf::dummy::Vdaf::new(1).with_verify_init_fn(|_| { ::std::result::Result::Err(::prio::vdaf::VdafError::Uncategorized( - "FakeFailsPrepInit failed at prep_init".to_string(), + "FakeFailsVerifyInit failed at verify_init".to_string(), )) }); type $Vdaf = ::prio::vdaf::dummy::Vdaf; @@ -353,10 +353,10 @@ macro_rules! vdaf_dispatch_impl_test_util { $body } - ::janus_core::vdaf::VdafInstance::FakeFailsPrepStep => { + ::janus_core::vdaf::VdafInstance::FakeFailsVerifyStep => { let $vdaf = ::prio::vdaf::dummy::Vdaf::new(1).with_verify_next_fn(|_| { ::std::result::Result::Err(::prio::vdaf::VdafError::Uncategorized( - "FakeFailsPrepStep failed at prep_step".to_string(), + "FakeFailsVerifyStep failed at verify_step".to_string(), )) }); type $Vdaf = ::prio::vdaf::dummy::Vdaf; @@ -386,8 +386,8 @@ macro_rules! vdaf_dispatch_impl { } ::janus_core::vdaf::VdafInstance::Fake { .. } - | ::janus_core::vdaf::VdafInstance::FakeFailsPrepInit - | ::janus_core::vdaf::VdafInstance::FakeFailsPrepStep => { + | ::janus_core::vdaf::VdafInstance::FakeFailsVerifyInit + | ::janus_core::vdaf::VdafInstance::FakeFailsVerifyStep => { ::janus_core::vdaf_dispatch_impl_test_util!(impl match test_util $vdaf_instance, ($vdaf, $Vdaf, $VERIFY_KEY_LEN, $dp_strategy, $DpStrategy) => $body) } @@ -649,17 +649,17 @@ mod tests { ], ); assert_tokens( - &VdafInstance::FakeFailsPrepInit, + &VdafInstance::FakeFailsVerifyInit, &[Token::UnitVariant { name: "VdafInstance", - variant: "FakeFailsPrepInit", + variant: "FakeFailsVerifyInit", }], ); assert_tokens( - &VdafInstance::FakeFailsPrepStep, + &VdafInstance::FakeFailsVerifyStep, &[Token::UnitVariant { name: "VdafInstance", - variant: "FakeFailsPrepStep", + variant: "FakeFailsVerifyStep", }], ); } diff --git a/db/00000000000001_initial_schema.up.sql b/db/00000000000001_initial_schema.up.sql index 240f5370d..5df15705e 100644 --- a/db/00000000000001_initial_schema.up.sql +++ b/db/00000000000001_initial_schema.up.sql @@ -195,7 +195,7 @@ CREATE TABLE task_aggregation_counters( report_dropped BIGINT NOT NULL DEFAULT 0, hpke_unknown_config_id BIGINT NOT NULL DEFAULT 0, hpke_decrypt_failure BIGINT NOT NULL DEFAULT 0, - vdaf_prep_error BIGINT NOT NULL DEFAULT 0, + vdaf_verify_error BIGINT NOT NULL DEFAULT 0, task_not_started BIGINT NOT NULL DEFAULT 0, task_expired BIGINT NOT NULL DEFAULT 0, invalid_message BIGINT NOT NULL DEFAULT 0, @@ -207,7 +207,7 @@ CREATE TABLE task_aggregation_counters( helper_report_dropped BIGINT NOT NULL DEFAULT 0, helper_hpke_unknown_config_id BIGINT NOT NULL DEFAULT 0, helper_hpke_decrypt_failure BIGINT NOT NULL DEFAULT 0, - helper_vdaf_prep_error BIGINT NOT NULL DEFAULT 0, + helper_vdaf_verify_error BIGINT NOT NULL DEFAULT 0, helper_task_not_started BIGINT NOT NULL DEFAULT 0, helper_task_expired BIGINT NOT NULL DEFAULT 0, helper_invalid_message BIGINT NOT NULL DEFAULT 0, @@ -296,8 +296,8 @@ CREATE TYPE REPORT_AGGREGATION_STATE AS ENUM( 'CONTINUE_PROCESSING', -- the aggregator is processing an aggregation continuation request asynchronously (helper only) 'POLL_INIT', -- the aggregator is polling for completion of a previous initialization operation (leader only) 'POLL_CONTINUE', -- the aggregator is polling for completion of a previous continuation operation (leader only) - 'FINISHED', -- the aggregator has completed the preparation process successfully - 'FAILED' -- the aggregator has completed the preparation process unsuccessfully + 'FINISHED', -- the aggregator has completed the verification process successfully + 'FAILED' -- the aggregator has completed the verification process unsuccessfully ); -- An aggregation attempt for a single client report. An aggregation job logically contains a number @@ -311,7 +311,7 @@ CREATE TABLE report_aggregations( client_report_id BYTEA NOT NULL, -- the client report ID this report aggregation is associated with -- the client timestamp this report aggregation is associated with client_timestamp TIMESTAMP WITH TIME ZONE NOT NULL, - last_prep_resp BYTEA, -- the last PrepareResp message sent to the Leader, to assist in replay (opaque DAP message, populated for Helper only) + last_verify_resp BYTEA, -- the last VerifyResp message sent to the Leader, to assist in replay (opaque DAP message, populated for Helper only) state REPORT_AGGREGATION_STATE NOT NULL, -- the current state of this report aggregation -- Additional data for state LeaderInit. @@ -322,20 +322,20 @@ CREATE TABLE report_aggregations( helper_encrypted_input_share BYTEA, -- encoded HPKE ciphertext of helper input share (opaque DAP message) -- Additional data for state LeaderContinue or LeaderPollContinue - leader_prep_transition BYTEA, -- the current VDAF prepare transition (opaque VDAF message) + leader_verify_transition BYTEA, -- the current VDAF verify transition (opaque VDAF message) -- Additional data for state LeaderPollInit. - leader_prep_state BYTEA, -- the current prepare state (opaque VDAF message) + leader_verify_state BYTEA, -- the current verify state (opaque VDAF message) -- Additional data for state HelperInitProcessing. - prepare_init BYTEA, -- the preparation initialization message received from the Leader (opaque DAP message) + verify_init BYTEA, -- the verification initialization message received from the Leader (opaque DAP message) require_taskbind_extension BOOLEAN, -- is the taskprov extension required? -- Additional data for state HelperContinue & HelperContinueProcessing. - helper_prep_state BYTEA, -- the current VDAF prepare state (opaque VDAF message) + helper_verify_state BYTEA, -- the current VDAF verify state (opaque VDAF message) -- Additional data for state HelperContinueProcessing. - prepare_continue BYTEA, -- the preparation continuation message received from the Leader (opaque VDAF message) + verify_continue BYTEA, -- the verification continuation message received from the Leader (opaque DAP message) -- Additional data for state Failed. error_code SMALLINT, -- error code corresponding to a DAP ReportShareError value @@ -393,7 +393,7 @@ CREATE INDEX batch_aggregations_gc_time ON batch_aggregations(task_id, UPPER(COA -- Specifies the possible state of a collection job. CREATE TYPE COLLECTION_JOB_STATE AS ENUM( 'START', -- this collection job is waiting for reports to be aggregated - 'POLL', -- this collection job is waiting for the helper to complete preparing its aggregate share + 'POLL', -- this collection job is waiting for the helper to complete computing its aggregate share 'FINISHED', -- this collection job has run successfully and is ready to be retrieved by the collector 'ABANDONED', -- this collection job has been abandoned & will never be run again 'DELETED' -- this collection job has been deleted diff --git a/integration_tests/tests/integration/simulation/bad_client.rs b/integration_tests/tests/integration/simulation/bad_client.rs index a4502b68b..b76887f0b 100644 --- a/integration_tests/tests/integration/simulation/bad_client.rs +++ b/integration_tests/tests/integration/simulation/bad_client.rs @@ -496,7 +496,7 @@ fn shard_encoded_measurement_correct() { let verify_key: [u8; VERIFY_KEY_LENGTH_PRIO3] = random(); let ctx = vdaf_application_context(&task_id); - let (leader_prepare_state, leader_prepare_share) = vdaf + let (leader_verify_state, leader_verify_share) = vdaf .verify_init( &verify_key, &ctx, @@ -507,7 +507,7 @@ fn shard_encoded_measurement_correct() { &input_shares[0], ) .unwrap(); - let (helper_prepare_state, helper_prepare_share) = vdaf + let (helper_verify_state, helper_verify_share) = vdaf .verify_init( &verify_key, &ctx, @@ -518,14 +518,14 @@ fn shard_encoded_measurement_correct() { &input_shares[1], ) .unwrap(); - let prepare_message = vdaf - .verifier_shares_to_message(&ctx, &(), [leader_prepare_share, helper_prepare_share]) + let verify_message = vdaf + .verifier_shares_to_message(&ctx, &(), [leader_verify_share, helper_verify_share]) .unwrap(); let leader_transition = vdaf - .verify_next(&ctx, leader_prepare_state, prepare_message.clone()) + .verify_next(&ctx, leader_verify_state, verify_message.clone()) .unwrap(); let helper_transition = vdaf - .verify_next(&ctx, helper_prepare_state, prepare_message) + .verify_next(&ctx, helper_verify_state, verify_message) .unwrap(); let leader_output_share = assert_matches!(leader_transition, VerifyTransition::Finish(output_share) => output_share); diff --git a/messages/src/lib.rs b/messages/src/lib.rs index 9b8a7f93d..c73945d09 100644 --- a/messages/src/lib.rs +++ b/messages/src/lib.rs @@ -2047,16 +2047,16 @@ impl Decode for ReportShare { } } -/// DAP protocol message representing information required to initialize preparation of a report for -/// aggregation. +/// DAP protocol message representing information required to initialize verification of a report +/// for aggregation. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct PrepareInit { +pub struct VerifyInit { report_share: ReportShare, message: PingPongMessage, } -impl PrepareInit { - /// Constructs a new preparation initialization message from its components. +impl VerifyInit { + /// Constructs a new verification initialization message from its components. pub fn new(report_share: ReportShare, message: PingPongMessage) -> Self { Self { report_share, @@ -2064,18 +2064,18 @@ impl PrepareInit { } } - /// Gets the report share associated with this prep init. + /// Gets the report share associated with this verify init. pub fn report_share(&self) -> &ReportShare { &self.report_share } - /// Gets the message associated with this prep init. + /// Gets the message associated with this verify init. pub fn message(&self) -> &PingPongMessage { &self.message } } -impl Encode for PrepareInit { +impl Encode for VerifyInit { fn encode(&self, bytes: &mut Vec) -> Result<(), CodecError> { self.report_share.encode(bytes)?; let encoded_message = self.message.get_encoded()?; @@ -2087,7 +2087,7 @@ impl Encode for PrepareInit { } } -impl Decode for PrepareInit { +impl Decode for VerifyInit { fn decode(bytes: &mut Cursor<&[u8]>) -> Result { let report_share = ReportShare::decode(bytes)?; let message_bytes = decode_u32_items(&(), bytes)?; @@ -2100,31 +2100,31 @@ impl Decode for PrepareInit { } } -/// DAP protocol message representing the response to a preparation step in a VDAF evaluation. +/// DAP protocol message representing the response to a verification step in a VDAF evaluation. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct PrepareResp { +pub struct VerifyResp { report_id: ReportId, - result: PrepareStepResult, + result: VerifyStepResult, } -impl PrepareResp { - /// Constructs a new prepare step from its components. - pub fn new(report_id: ReportId, result: PrepareStepResult) -> Self { +impl VerifyResp { + /// Constructs a new verify step from its components. + pub fn new(report_id: ReportId, result: VerifyStepResult) -> Self { Self { report_id, result } } - /// Gets the report ID associated with this prepare step. + /// Gets the report ID associated with this verify step. pub fn report_id(&self) -> &ReportId { &self.report_id } - /// Gets the result associated with this prepare step. - pub fn result(&self) -> &PrepareStepResult { + /// Gets the result associated with this verify step. + pub fn result(&self) -> &VerifyStepResult { &self.result } } -impl Encode for PrepareResp { +impl Encode for VerifyResp { fn encode(&self, bytes: &mut Vec) -> Result<(), CodecError> { self.report_id.encode(bytes)?; self.result.encode(bytes) @@ -2135,20 +2135,20 @@ impl Encode for PrepareResp { } } -impl Decode for PrepareResp { +impl Decode for VerifyResp { fn decode(bytes: &mut Cursor<&[u8]>) -> Result { let report_id = ReportId::decode(bytes)?; - let result = PrepareStepResult::decode(bytes)?; + let result = VerifyStepResult::decode(bytes)?; Ok(Self { report_id, result }) } } -/// DAP protocol message representing result-type-specific data associated with a preparation step -/// in a VDAF evaluation. Included in a PrepareResp message. +/// DAP protocol message representing result-type-specific data associated with a verification step +/// in a VDAF evaluation. Included in a VerifyResp message. #[derive(Clone, Educe, PartialEq, Eq)] #[educe(Debug)] -pub enum PrepareStepResult { +pub enum VerifyStepResult { Continue { #[educe(Debug(ignore))] message: PingPongMessage, @@ -2157,9 +2157,9 @@ pub enum PrepareStepResult { Reject(ReportError), } -impl Encode for PrepareStepResult { +impl Encode for VerifyStepResult { fn encode(&self, bytes: &mut Vec) -> Result<(), CodecError> { - // The encoding includes an implicit discriminator byte, called PrepareStepResult in the + // The encoding includes an implicit discriminator byte, called VerifyStepResult in the // DAP spec. match self { Self::Continue { message } => { @@ -2183,13 +2183,13 @@ impl Encode for PrepareStepResult { } } -impl Decode for PrepareStepResult { +impl Decode for VerifyStepResult { fn decode(bytes: &mut Cursor<&[u8]>) -> Result { let val = u8::decode(bytes)?; Ok(match val { 0 => { - let prep_msg_bytes = decode_u32_items(&(), bytes)?; - let message = PingPongMessage::get_decoded(&prep_msg_bytes)?; + let verify_msg_bytes = decode_u32_items(&(), bytes)?; + let message = PingPongMessage::get_decoded(&verify_msg_bytes)?; Self::Continue { message } } 1 => Self::Finished, @@ -2199,7 +2199,7 @@ impl Decode for PrepareStepResult { } } -/// DAP protocol message representing an error while preparing a report share for aggregation. +/// DAP protocol message representing an error while verifying a report share for aggregation. #[derive(Clone, Copy, Debug, PartialEq, Eq, TryFromPrimitive)] #[repr(u8)] pub enum ReportError { @@ -2209,7 +2209,7 @@ pub enum ReportError { ReportDropped = 3, HpkeUnknownConfigId = 4, HpkeDecryptError = 5, - VdafPrepError = 6, + VdafVerifyError = 6, TaskExpired = 7, InvalidMessage = 8, ReportTooEarly = 9, @@ -2235,32 +2235,32 @@ impl Decode for ReportError { } } -/// DAP protocol message representing a request to continue preparation of a report share for +/// DAP protocol message representing a request to continue verification of a report share for /// aggregation. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct PrepareContinue { +pub struct VerifyContinue { report_id: ReportId, message: PingPongMessage, } -impl PrepareContinue { - /// Constructs a new prepare continue from its components. +impl VerifyContinue { + /// Constructs a new verify continue from its components. pub fn new(report_id: ReportId, message: PingPongMessage) -> Self { Self { report_id, message } } - /// Gets the report ID associated with this prepare continue. + /// Gets the report ID associated with this verify continue. pub fn report_id(&self) -> &ReportId { &self.report_id } - /// Gets the message associated with this prepare continue. + /// Gets the message associated with this verify continue. pub fn message(&self) -> &PingPongMessage { &self.message } } -impl Encode for PrepareContinue { +impl Encode for VerifyContinue { fn encode(&self, bytes: &mut Vec) -> Result<(), CodecError> { self.report_id.encode(bytes)?; let encoded_message = self.message.get_encoded()?; @@ -2272,7 +2272,7 @@ impl Encode for PrepareContinue { } } -impl Decode for PrepareContinue { +impl Decode for VerifyContinue { fn decode(bytes: &mut Cursor<&[u8]>) -> Result { let report_id = ReportId::decode(bytes)?; let message_bytes = decode_u32_items(&(), bytes)?; @@ -2351,7 +2351,7 @@ pub struct AggregationJobInitializeReq { #[educe(Debug(ignore))] aggregation_parameter: Vec, partial_batch_selector: PartialBatchSelector, - prepare_inits: Vec, + verify_inits: Vec, } impl AggregationJobInitializeReq { @@ -2359,12 +2359,12 @@ impl AggregationJobInitializeReq { pub fn new( aggregation_parameter: Vec, partial_batch_selector: PartialBatchSelector, - prepare_inits: Vec, + verify_inits: Vec, ) -> Self { Self { aggregation_parameter, partial_batch_selector, - prepare_inits, + verify_inits, } } @@ -2378,10 +2378,10 @@ impl AggregationJobInitializeReq { &self.partial_batch_selector } - /// Gets the preparation initialization messages associated with this aggregate initialization + /// Gets the verification initialization messages associated with this aggregate initialization /// request. - pub fn prepare_inits(&self) -> &[PrepareInit] { - &self.prepare_inits + pub fn verify_inits(&self) -> &[VerifyInit] { + &self.verify_inits } } @@ -2393,15 +2393,15 @@ impl Encode for AggregationJobInitializeReq { fn encode(&self, bytes: &mut Vec) -> Result<(), CodecError> { encode_u32_items(bytes, &(), &self.aggregation_parameter)?; self.partial_batch_selector.encode(bytes)?; - encode_u32_items(bytes, &(), &self.prepare_inits) + encode_u32_items(bytes, &(), &self.verify_inits) } fn encoded_len(&self) -> Option { let mut length = 4 + self.aggregation_parameter.len(); length += self.partial_batch_selector.encoded_len()?; length += 4; - for prepare_init in &self.prepare_inits { - length += prepare_init.encoded_len()?; + for verify_init in &self.verify_inits { + length += verify_init.encoded_len()?; } Some(length) } @@ -2411,12 +2411,12 @@ impl Decode for AggregationJobInitializeReq { fn decode(bytes: &mut Cursor<&[u8]>) -> Result { let aggregation_parameter = decode_u32_items(&(), bytes)?; let partial_batch_selector = PartialBatchSelector::decode(bytes)?; - let prepare_inits = decode_u32_items(&(), bytes)?; + let verify_inits = decode_u32_items(&(), bytes)?; Ok(Self { aggregation_parameter, partial_batch_selector, - prepare_inits, + verify_inits, }) } } @@ -2482,15 +2482,15 @@ impl TryFrom for AggregationJobStep { #[derive(Clone, Debug, PartialEq, Eq)] pub struct AggregationJobContinueReq { step: AggregationJobStep, - prepare_continues: Vec, + verify_continues: Vec, } impl AggregationJobContinueReq { /// Constructs a new aggregate continuation response from its components. - pub fn new(step: AggregationJobStep, prepare_continues: Vec) -> Self { + pub fn new(step: AggregationJobStep, verify_continues: Vec) -> Self { Self { step, - prepare_continues, + verify_continues, } } @@ -2499,10 +2499,10 @@ impl AggregationJobContinueReq { self.step } - /// Gets the preparation continuation messages associated with this aggregate continuation + /// Gets the verification continuation messages associated with this aggregate continuation /// request. - pub fn prepare_continues(&self) -> &[PrepareContinue] { - &self.prepare_continues + pub fn verify_continues(&self) -> &[VerifyContinue] { + &self.verify_continues } } @@ -2513,14 +2513,14 @@ impl MediaType for AggregationJobContinueReq { impl Encode for AggregationJobContinueReq { fn encode(&self, bytes: &mut Vec) -> Result<(), CodecError> { self.step.encode(bytes)?; - encode_u32_items(bytes, &(), &self.prepare_continues) + encode_u32_items(bytes, &(), &self.verify_continues) } fn encoded_len(&self) -> Option { let mut length = self.step.encoded_len()?; length += 4; - for prepare_continue in self.prepare_continues.iter() { - length += prepare_continue.encoded_len()?; + for verify_continue in self.verify_continues.iter() { + length += verify_continue.encoded_len()?; } Some(length) } @@ -2529,8 +2529,8 @@ impl Encode for AggregationJobContinueReq { impl Decode for AggregationJobContinueReq { fn decode(bytes: &mut Cursor<&[u8]>) -> Result { let step = AggregationJobStep::decode(bytes)?; - let prepare_continues = decode_u32_items(&(), bytes)?; - Ok(Self::new(step, prepare_continues)) + let verify_continues = decode_u32_items(&(), bytes)?; + Ok(Self::new(step, verify_continues)) } } @@ -2538,7 +2538,7 @@ impl Decode for AggregationJobContinueReq { /// continuation request. #[derive(Clone, Debug, PartialEq, Eq)] pub struct AggregationJobResp { - pub prepare_resps: Vec, + pub verify_resps: Vec, } impl MediaType for AggregationJobResp { @@ -2548,13 +2548,13 @@ impl MediaType for AggregationJobResp { impl Encode for AggregationJobResp { fn encode(&self, bytes: &mut Vec) -> Result<(), CodecError> { - encode_u32_items(bytes, &(), &self.prepare_resps) + encode_u32_items(bytes, &(), &self.verify_resps) } fn encoded_len(&self) -> Option { let mut len = 4; - for prepare_resp in &self.prepare_resps { - len += prepare_resp.encoded_len()?; + for verify_resp in &self.verify_resps { + len += verify_resp.encoded_len()?; } Some(len) } @@ -2563,7 +2563,7 @@ impl Encode for AggregationJobResp { impl Decode for AggregationJobResp { fn decode(bytes: &mut Cursor<&[u8]>) -> Result { Ok(Self { - prepare_resps: decode_u32_items(&(), bytes)?, + verify_resps: decode_u32_items(&(), bytes)?, }) } } diff --git a/messages/src/problem_type.rs b/messages/src/problem_type.rs index f5211e61f..016672b3e 100644 --- a/messages/src/problem_type.rs +++ b/messages/src/problem_type.rs @@ -67,7 +67,7 @@ impl DapProblemType { "Leader and helper disagree on reports aggregated in a batch." } DapProblemType::StepMismatch => { - "The leader and helper are not on the same step of VDAF preparation." + "The leader and helper are not on the same step of VDAF verification." } DapProblemType::BatchOverlap => { "The queried batch overlaps with a previously queried batch." diff --git a/messages/src/tests/aggregation.rs b/messages/src/tests/aggregation.rs index fb326cf91..80a95ff92 100644 --- a/messages/src/tests/aggregation.rs +++ b/messages/src/tests/aggregation.rs @@ -3,8 +3,8 @@ use prio::topology::ping_pong::PingPongMessage; use crate::{ AggregationJobContinueReq, AggregationJobInitializeReq, AggregationJobResp, AggregationJobStep, BatchId, Extension, ExtensionType, HpkeCiphertext, HpkeConfigId, LeaderSelected, - PartialBatchSelector, PrepareContinue, PrepareInit, PrepareResp, PrepareStepResult, - ReportError, ReportId, ReportMetadata, ReportShare, Time, TimePrecision, roundtrip_encoding, + PartialBatchSelector, ReportError, ReportId, ReportMetadata, ReportShare, Time, TimePrecision, + VerifyContinue, VerifyInit, VerifyResp, VerifyStepResult, roundtrip_encoding, }; const TEST_TIME_PRECISION: TimePrecision = TimePrecision::from_seconds(1); @@ -114,10 +114,10 @@ fn roundtrip_report_share() { } #[test] -fn roundtrip_prepare_init() { +fn roundtrip_verify_init() { roundtrip_encoding(&[ ( - PrepareInit { + VerifyInit { report_share: ReportShare { metadata: ReportMetadata::new( ReportId::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), @@ -179,7 +179,7 @@ fn roundtrip_prepare_init() { ), ), ( - PrepareInit { + VerifyInit { report_share: ReportShare { metadata: ReportMetadata::new( ReportId::from([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), @@ -252,12 +252,12 @@ fn roundtrip_prepare_init() { } #[test] -fn roundtrip_prepare_resp() { +fn roundtrip_verify_resp() { roundtrip_encoding(&[ ( - PrepareResp { + VerifyResp { report_id: ReportId::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), - result: PrepareStepResult::Continue { + result: VerifyStepResult::Continue { message: PingPongMessage::Continue { verifier_message: Vec::from("012345"), verifier_share: Vec::from("6789"), @@ -266,7 +266,7 @@ fn roundtrip_prepare_resp() { }, concat!( "0102030405060708090A0B0C0D0E0F10", // report_id - "00", // prepare_step_result + "00", // verify_step_result concat!( // message "00000013", // ping pong message length @@ -283,23 +283,23 @@ fn roundtrip_prepare_resp() { ), ), ( - PrepareResp { + VerifyResp { report_id: ReportId::from([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), - result: PrepareStepResult::Finished, + result: VerifyStepResult::Finished, }, concat!( "100F0E0D0C0B0A090807060504030201", // report_id - "01", // prepare_step_result + "01", // verify_step_result ), ), ( - PrepareResp { + VerifyResp { report_id: ReportId::from([255; 16]), - result: PrepareStepResult::Reject(ReportError::VdafPrepError), + result: VerifyStepResult::Reject(ReportError::VdafVerifyError), }, concat!( "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", // report_id - "02", // prepare_step_result + "02", // verify_step_result "06", // report_share_error ), ), @@ -315,7 +315,7 @@ fn roundtrip_report_share_error() { (ReportError::ReportDropped, "03"), (ReportError::HpkeUnknownConfigId, "04"), (ReportError::HpkeDecryptError, "05"), - (ReportError::VdafPrepError, "06"), + (ReportError::VdafVerifyError, "06"), (ReportError::TaskExpired, "07"), (ReportError::InvalidMessage, "08"), (ReportError::ReportTooEarly, "09"), @@ -330,8 +330,8 @@ fn roundtrip_aggregation_job_initialize_req() { AggregationJobInitializeReq { aggregation_parameter: Vec::from("012345"), partial_batch_selector: PartialBatchSelector::new_time_interval(), - prepare_inits: Vec::from([ - PrepareInit { + verify_inits: Vec::from([ + VerifyInit { report_share: ReportShare { metadata: ReportMetadata::new( ReportId::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), @@ -349,7 +349,7 @@ fn roundtrip_aggregation_job_initialize_req() { verifier_share: Vec::from("012345"), }, }, - PrepareInit { + VerifyInit { report_share: ReportShare { metadata: ReportMetadata::new( ReportId::from([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), @@ -382,7 +382,7 @@ fn roundtrip_aggregation_job_initialize_req() { "", // opaque data ), concat!( - // prepare_inits + // verify_inits "00000082", // length concat!( concat!( @@ -486,8 +486,8 @@ fn roundtrip_aggregation_job_initialize_req() { partial_batch_selector: PartialBatchSelector::new_leader_selected(BatchId::from( [2u8; 32], )), - prepare_inits: Vec::from([ - PrepareInit { + verify_inits: Vec::from([ + VerifyInit { report_share: ReportShare { metadata: ReportMetadata::new( ReportId::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), @@ -505,7 +505,7 @@ fn roundtrip_aggregation_job_initialize_req() { verifier_share: Vec::from("012345"), }, }, - PrepareInit { + VerifyInit { report_share: ReportShare { metadata: ReportMetadata::new( ReportId::from([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), @@ -538,7 +538,7 @@ fn roundtrip_aggregation_job_initialize_req() { "0202020202020202020202020202020202020202020202020202020202020202", // opaque data ), concat!( - // prepare_inits + // verify_inits "00000082", // length concat!( concat!( @@ -641,8 +641,8 @@ fn roundtrip_aggregation_job_continue_req() { roundtrip_encoding(&[( AggregationJobContinueReq { step: AggregationJobStep(42405), - prepare_continues: Vec::from([ - PrepareContinue { + verify_continues: Vec::from([ + VerifyContinue { report_id: ReportId::from([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ]), @@ -650,7 +650,7 @@ fn roundtrip_aggregation_job_continue_req() { verifier_share: Vec::from("012345"), }, }, - PrepareContinue { + VerifyContinue { report_id: ReportId::from([ 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, ]), @@ -663,7 +663,7 @@ fn roundtrip_aggregation_job_continue_req() { concat!( "A5A5", // step concat!( - // prepare_steps + // verify_steps "0000003e", // length concat!( "0102030405060708090A0B0C0D0E0F10", // report_id @@ -698,32 +698,32 @@ fn roundtrip_aggregation_job_continue_req() { fn roundtrip_aggregation_job_resp() { roundtrip_encoding(&[( AggregationJobResp { - prepare_resps: Vec::from([ - PrepareResp { + verify_resps: Vec::from([ + VerifyResp { report_id: ReportId::from([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ]), - result: PrepareStepResult::Continue { + result: VerifyStepResult::Continue { message: PingPongMessage::Continue { verifier_message: Vec::from("01234"), verifier_share: Vec::from("56789"), }, }, }, - PrepareResp { + VerifyResp { report_id: ReportId::from([ 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, ]), - result: PrepareStepResult::Finished, + result: VerifyStepResult::Finished, }, ]), }, concat!( - // prepare_steps + // verify_steps "00000039", // length concat!( "0102030405060708090A0B0C0D0E0F10", // report_id - "00", // prepare_step_result + "00", // verify_step_result concat!( "00000013", // ping pong message length "01", // ping pong message type @@ -741,7 +741,7 @@ fn roundtrip_aggregation_job_resp() { ), concat!( "100F0E0D0C0B0A090807060504030201", // report_id - "01", // prepare_step_result + "01", // verify_step_result ) ), )])