From 4adfc1fbb59d21006642393e3ed1d38d4e414e1d Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 7 Feb 2023 11:30:06 +0000 Subject: [PATCH 001/105] Add compact vrf modulo assignment Signed-off-by: Andrei Sandu --- node/primitives/src/approval.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 809dc57bcbc7..e43ec3f75d68 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -75,6 +75,16 @@ pub enum AssignmentCertKind { /// The core index chosen in this cert. core_index: CoreIndex, }, + /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModuloCompact { + /// The number of samples. + sample: u32, + /// The assigned cores. + core_indices: Vec, + }, } /// A certification of assignment. From 71bf07b303a869581cc0e32f4bb6a7879a9f875f Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 7 Feb 2023 11:32:43 +0000 Subject: [PATCH 002/105] compute all assignments from a single vrf output Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 85 +++++++++++++++++++++-- 1 file changed, 80 insertions(+), 5 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 6707fc5672aa..696ba8935a66 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -97,6 +97,52 @@ fn relay_vrf_modulo_transcript(relay_vrf_story: RelayVRFStory, sample: u32) -> T t } +/// A hard upper bound on num_cores * target_checkers / num_validators +const MAX_MODULO_SAMPLES: usize = 40; + +use std::convert::AsMut; + +fn clone_into_array(slice: &[T]) -> A +where + A: Default + AsMut<[T]>, + T: Clone, +{ + let mut a = A::default(); + >::as_mut(&mut a).clone_from_slice(slice); + a +} + +struct BigArray(pub [u8; MAX_MODULO_SAMPLES * 4]); + +impl Default for BigArray { + fn default() -> Self { + BigArray([0u8; MAX_MODULO_SAMPLES * 4]) + } +} + +impl AsMut<[u8]> for BigArray { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +/// Return an iterator to all core indices we are assigned to. +fn relay_vrf_modulo_cores( + vrf_in_out: &VRFInOut, + // Configuration - `relay_vrf_modulo_samples`. + num_samples: u32, + // Configuration - `n_cores`. + max_cores: u32, +) -> Vec { + vrf_in_out + .make_bytes::(approval_types::CORE_RANDOMNESS_CONTEXT) + .0 + .chunks_exact(4) + .take(num_samples as usize) + .map(move |sample| CoreIndex(u32::from_le_bytes(clone_into_array(&sample)) % max_cores)) + .collect::>() +} + fn relay_vrf_modulo_core(vrf_in_out: &VRFInOut, n_cores: u32) -> CoreIndex { let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::CORE_RANDOMNESS_CONTEXT); @@ -133,6 +179,12 @@ fn assigned_core_transcript(core_index: CoreIndex) -> Transcript { t } +fn assigned_cores_transcript(core_indices: &Vec) -> Transcript { + let mut t = Transcript::new(approval_types::ASSIGNED_CORE_CONTEXT); + core_indices.using_encoded(|s| t.append_message(b"cores", s)); + t +} + /// Information about the world assignments are being produced in. #[derive(Clone)] pub(crate) struct Config { @@ -497,15 +549,38 @@ pub(crate) fn check_assignment_cert( } let &(ref vrf_output, ref vrf_proof) = &assignment.vrf; - match assignment.kind { + match &assignment.kind { + AssignmentCertKind::RelayVRFModuloCompact { sample, core_indices } => { + if *sample >= config.relay_vrf_modulo_samples { + return Err(InvalidAssignment(Reason::SampleOutOfBounds)) + } + + let (vrf_in_out, _) = public + .vrf_verify_extra( + relay_vrf_modulo_transcript(relay_vrf_story, *sample), + &vrf_output.0, + &vrf_proof.0, + assigned_cores_transcript(core_indices), + ) + .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; + + // ensure that the `vrf_in_out` actually gives us the claimed core. + if relay_vrf_modulo_cores(&vrf_in_out, *sample, config.n_cores) + .contains(&claimed_core_index) + { + Ok(0) + } else { + Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) + } + }, AssignmentCertKind::RelayVRFModulo { sample } => { - if sample >= config.relay_vrf_modulo_samples { + if *sample >= config.relay_vrf_modulo_samples { return Err(InvalidAssignment(Reason::SampleOutOfBounds)) } let (vrf_in_out, _) = public .vrf_verify_extra( - relay_vrf_modulo_transcript(relay_vrf_story, sample), + relay_vrf_modulo_transcript(relay_vrf_story, *sample), &vrf_output.0, &vrf_proof.0, assigned_core_transcript(claimed_core_index), @@ -520,13 +595,13 @@ pub(crate) fn check_assignment_cert( } }, AssignmentCertKind::RelayVRFDelay { core_index } => { - if core_index != claimed_core_index { + if *core_index != claimed_core_index { return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch)) } let (vrf_in_out, _) = public .vrf_verify( - relay_vrf_delay_transcript(relay_vrf_story, core_index), + relay_vrf_delay_transcript(relay_vrf_story, *core_index), &vrf_output.0, &vrf_proof.0, ) From b1aabc950c47ae0df2971991345640cbe3ade0f0 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 7 Feb 2023 14:33:14 +0000 Subject: [PATCH 003/105] impl compute/check of `RelayVRFModuloCompact` Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 174 +++++++++++++--------- node/core/approval-voting/src/import.rs | 2 +- node/core/approval-voting/src/lib.rs | 2 +- 3 files changed, 105 insertions(+), 73 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 696ba8935a66..9eddfa81940d 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -227,7 +227,7 @@ pub(crate) trait AssignmentCriteria { fn check_assignment_cert( &self, - claimed_core_index: CoreIndex, + claimed_core_index: Vec, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, @@ -251,7 +251,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { fn check_assignment_cert( &self, - claimed_core_index: CoreIndex, + claimed_core_index: Vec, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, @@ -341,16 +341,19 @@ pub(crate) fn compute_assignments( let mut assignments = HashMap::new(); - // First run `RelayVRFModulo` for each sample. + // TODO: support all vrf modulo assignment kinds. + // For now we only do compact. compute_relay_vrf_modulo_assignments( &assignments_key, index, config, relay_vrf_story.clone(), - leaving_cores.iter().cloned(), + leaving_cores.clone(), &mut assignments, ); + //TODO: Add assignment into `assignments` per core map. + // Then run `RelayVRFDelay` once for the whole block. compute_relay_vrf_delay_assignments( &assignments_key, @@ -369,58 +372,61 @@ fn compute_relay_vrf_modulo_assignments( validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, - leaving_cores: impl IntoIterator + Clone, + leaving_cores: Vec<(CandidateHash, CoreIndex)>, assignments: &mut HashMap, ) { - for rvm_sample in 0..config.relay_vrf_modulo_samples { - let mut core = CoreIndex::default(); - - let maybe_assignment = { - // Extra scope to ensure borrowing instead of moving core - // into closure. - let core = &mut core; - assignments_key.vrf_sign_extra_after_check( - relay_vrf_modulo_transcript(relay_vrf_story.clone(), rvm_sample), - |vrf_in_out| { - *core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); - if let Some((candidate_hash, _)) = - leaving_cores.clone().into_iter().find(|(_, c)| c == core) - { - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - ?core, - ?validator_index, - tranche = 0, - "RelayVRFModulo Assignment." - ); - - Some(assigned_core_transcript(*core)) - } else { - None - } - }, - ) - }; + let mut assigned_cores = Vec::new(); + // for rvm_sample in 0..config.relay_vrf_modulo_samples { + let maybe_assignment = { + let assigned_cores = &mut assigned_cores; + assignments_key.vrf_sign_extra_after_check( + relay_vrf_modulo_transcript(relay_vrf_story.clone(), config.relay_vrf_modulo_samples - 1), + |vrf_in_out| { + *assigned_cores = relay_vrf_modulo_cores( + &vrf_in_out, + config.relay_vrf_modulo_samples, + config.n_cores, + ) + .into_iter() + .filter(|core| { + leaving_cores.iter().map(|(_, core)| core).collect::>().contains(&core) + }) + .collect::>(); + + if !assigned_cores.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?assigned_cores, + ?validator_index, + tranche = 0, + "RelayVRFModulo Assignment." + ); + + Some(assigned_cores_transcript(assigned_cores)) + } else { + None + } + }, + ) + }; - if let Some((vrf_in_out, vrf_proof, _)) = maybe_assignment { - // Sanity: `core` is always initialized to non-default here, as the closure above - // has been executed. - let cert = AssignmentCert { - kind: AssignmentCertKind::RelayVRFModulo { sample: rvm_sample }, - vrf: ( - approval_types::VRFOutput(vrf_in_out.to_output()), - approval_types::VRFProof(vrf_proof), - ), - }; + if let Some(assignment) = maybe_assignment.map(|(vrf_in_out, vrf_proof, _)| { + let cert = AssignmentCert { + kind: AssignmentCertKind::RelayVRFModuloCompact { + sample: config.relay_vrf_modulo_samples - 1, + core_indices: assigned_cores.clone(), + }, + vrf: ( + approval_types::VRFOutput(vrf_in_out.to_output()), + approval_types::VRFProof(vrf_proof), + ), + }; - // All assignments of type RelayVRFModulo have tranche 0. - assignments.entry(core).or_insert(OurAssignment { - cert, - tranche: 0, - validator_index, - triggered: false, - }); + // All assignments of type RelayVRFModulo have tranche 0. + OurAssignment { cert, tranche: 0, validator_index, triggered: false } + }) { + for core_index in assigned_cores { + assignments.insert(core_index, assignment.clone()); } } } @@ -518,7 +524,7 @@ pub(crate) enum InvalidAssignmentReason { /// This function does not check whether the core is actually a valid assignment or not. That should be done /// outside the scope of this function. pub(crate) fn check_assignment_cert( - claimed_core_index: CoreIndex, + claimed_core_index: Vec, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, @@ -535,8 +541,10 @@ pub(crate) fn check_assignment_cert( let public = schnorrkel::PublicKey::from_bytes(validator_public.as_slice()) .map_err(|_| InvalidAssignment(Reason::InvalidAssignmentKey))?; - if claimed_core_index.0 >= config.n_cores { - return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds)) + for claimed_cores in &claimed_core_index { + if claimed_cores.0 >= config.n_cores { + return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds)) + } } // Check that the validator was not part of the backing group @@ -564,9 +572,12 @@ pub(crate) fn check_assignment_cert( ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; - // ensure that the `vrf_in_out` actually gives us the claimed core. - if relay_vrf_modulo_cores(&vrf_in_out, *sample, config.n_cores) - .contains(&claimed_core_index) + let got_cores = relay_vrf_modulo_cores(&vrf_in_out, *sample + 1, config.n_cores); + println!("Claimed cores: {:?}", &claimed_core_index); + println!("Claimed cores: {:?}", &got_cores); + + // ensure that the `vrf_in_out` actually gives us the claimed cores. + if got_cores == claimed_core_index { Ok(0) } else { @@ -583,19 +594,19 @@ pub(crate) fn check_assignment_cert( relay_vrf_modulo_transcript(relay_vrf_story, *sample), &vrf_output.0, &vrf_proof.0, - assigned_core_transcript(claimed_core_index), + assigned_core_transcript(claimed_core_index[0]), ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; // ensure that the `vrf_in_out` actually gives us the claimed core. - if relay_vrf_modulo_core(&vrf_in_out, config.n_cores) == claimed_core_index { + if relay_vrf_modulo_core(&vrf_in_out, config.n_cores) == claimed_core_index[0] { Ok(0) } else { Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, AssignmentCertKind::RelayVRFDelay { core_index } => { - if *core_index != claimed_core_index { + if *core_index != claimed_core_index[0] { return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch)) } @@ -788,7 +799,7 @@ mod tests { } struct MutatedAssignment { - core: CoreIndex, + cores: Vec, cert: AssignmentCert, group: GroupIndex, own_group: GroupIndex, @@ -838,7 +849,17 @@ mod tests { let mut counted = 0; for (core, assignment) in assignments { let mut mutated = MutatedAssignment { - core, + cores: match assignment.cert.kind.clone() { + AssignmentCertKind::RelayVRFModuloCompact { sample: _ , core_indices } => { + core_indices + }, + AssignmentCertKind::RelayVRFModulo { sample: _ } => { + vec![core] + }, + AssignmentCertKind::RelayVRFDelay { core_index } => { + vec![core_index] + }, + }, group: group_for_core(core.0 as _), cert: assignment.cert, own_group: GroupIndex(0), @@ -854,16 +875,15 @@ mod tests { counted += 1; let is_good = check_assignment_cert( - mutated.core, + mutated.cores, mutated.val_index, &mutated.config, relay_vrf_story.clone(), &mutated.cert, mutated.group, - ) - .is_ok(); + ).is_ok(); - assert_eq!(expected, is_good) + assert_eq!(expected, is_good); } assert!(counted > 0); @@ -877,7 +897,7 @@ mod tests { #[test] fn check_rejects_claimed_core_out_of_bounds() { check_mutated_assignments(200, 100, 25, |m| { - m.core.0 += 100; + m.cores[0].0 += 100; Some(false) }); } @@ -919,6 +939,10 @@ mod tests { m.cert.vrf = garbage_vrf(); Some(false) }, + AssignmentCertKind::RelayVRFModuloCompact { .. } => { + m.cert.vrf = garbage_vrf(); + Some(false) + }, _ => None, // skip everything else. } }); @@ -932,6 +956,10 @@ mod tests { m.config.relay_vrf_modulo_samples = sample; Some(false) }, + AssignmentCertKind::RelayVRFModuloCompact { sample , core_indices: _} => { + m.config.relay_vrf_modulo_samples = sample; + Some(false) + }, _ => None, // skip everything else. } }); @@ -942,7 +970,9 @@ mod tests { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { AssignmentCertKind::RelayVRFDelay { .. } => { - m.core = CoreIndex((m.core.0 + 1) % 100); + for core in &mut m.cores { + core.0 = (core.0 + 1) % 100; + } Some(false) }, _ => None, // skip everything else. @@ -954,8 +984,10 @@ mod tests { fn check_rejects_modulo_core_wrong() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFModulo { .. } => { - m.core = CoreIndex((m.core.0 + 1) % 100); + AssignmentCertKind::RelayVRFModulo { .. } | AssignmentCertKind::RelayVRFModuloCompact { .. } => { + for core in &mut m.cores { + core.0 = (core.0 + 1) % 100; + } Some(false) }, _ => None, // skip everything else. diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index f36b79c7a4e1..e5e3d50c6e81 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -690,7 +690,7 @@ pub(crate) mod tests { fn check_assignment_cert( &self, - _claimed_core_index: polkadot_primitives::CoreIndex, + _claimed_core_index: Vec, _validator_index: polkadot_primitives::ValidatorIndex, _config: &criteria::Config, _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 900d3107b034..2d34704168bc 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1738,7 +1738,7 @@ fn check_and_import_assignment( }; let res = state.assignment_criteria.check_assignment_cert( - claimed_core_index, + vec![claimed_core_index], assignment.validator, &criteria::Config::from(session_info), block_entry.relay_vrf_story(), From 8b881ee53d7b31928dd0da1be2164a4df7a564d7 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 7 Feb 2023 14:33:45 +0000 Subject: [PATCH 004/105] fmt and missed file Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 21 ++++++++++++--------- node/core/approval-voting/src/tests.rs | 2 +- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 9eddfa81940d..1d815d706ba8 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -380,7 +380,10 @@ fn compute_relay_vrf_modulo_assignments( let maybe_assignment = { let assigned_cores = &mut assigned_cores; assignments_key.vrf_sign_extra_after_check( - relay_vrf_modulo_transcript(relay_vrf_story.clone(), config.relay_vrf_modulo_samples - 1), + relay_vrf_modulo_transcript( + relay_vrf_story.clone(), + config.relay_vrf_modulo_samples - 1, + ), |vrf_in_out| { *assigned_cores = relay_vrf_modulo_cores( &vrf_in_out, @@ -577,8 +580,7 @@ pub(crate) fn check_assignment_cert( println!("Claimed cores: {:?}", &got_cores); // ensure that the `vrf_in_out` actually gives us the claimed cores. - if got_cores == claimed_core_index - { + if got_cores == claimed_core_index { Ok(0) } else { Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) @@ -850,9 +852,8 @@ mod tests { for (core, assignment) in assignments { let mut mutated = MutatedAssignment { cores: match assignment.cert.kind.clone() { - AssignmentCertKind::RelayVRFModuloCompact { sample: _ , core_indices } => { - core_indices - }, + AssignmentCertKind::RelayVRFModuloCompact { sample: _, core_indices } => + core_indices, AssignmentCertKind::RelayVRFModulo { sample: _ } => { vec![core] }, @@ -881,7 +882,8 @@ mod tests { relay_vrf_story.clone(), &mutated.cert, mutated.group, - ).is_ok(); + ) + .is_ok(); assert_eq!(expected, is_good); } @@ -956,7 +958,7 @@ mod tests { m.config.relay_vrf_modulo_samples = sample; Some(false) }, - AssignmentCertKind::RelayVRFModuloCompact { sample , core_indices: _} => { + AssignmentCertKind::RelayVRFModuloCompact { sample, core_indices: _ } => { m.config.relay_vrf_modulo_samples = sample; Some(false) }, @@ -984,7 +986,8 @@ mod tests { fn check_rejects_modulo_core_wrong() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFModulo { .. } | AssignmentCertKind::RelayVRFModuloCompact { .. } => { + AssignmentCertKind::RelayVRFModulo { .. } | + AssignmentCertKind::RelayVRFModuloCompact { .. } => { for core in &mut m.cores { core.0 = (core.0 + 1) % 100; } diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index ee13db7bcf54..a8359f58205e 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -246,7 +246,7 @@ where fn check_assignment_cert( &self, - _claimed_core_index: polkadot_primitives::CoreIndex, + _claimed_core_index: Vec, validator_index: ValidatorIndex, _config: &criteria::Config, _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, From 81e62ec95c2a1ba037212e18dbd40136134e8f19 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 8 Feb 2023 11:07:40 +0000 Subject: [PATCH 005/105] check_and_import_assignment: multiple candidates - tuned test params to test single and multiple assignments Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 89 ++++++----- node/core/approval-voting/src/import.rs | 2 +- node/core/approval-voting/src/lib.rs | 185 +++++++++++++--------- node/core/approval-voting/src/tests.rs | 10 +- 4 files changed, 167 insertions(+), 119 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 1d815d706ba8..933326f300f2 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -186,7 +186,7 @@ fn assigned_cores_transcript(core_indices: &Vec) -> Transcript { } /// Information about the world assignments are being produced in. -#[derive(Clone)] +#[derive(Clone, Debug)] pub(crate) struct Config { /// The assignment public keys for validators. assignment_keys: Vec, @@ -232,7 +232,8 @@ pub(crate) trait AssignmentCriteria { config: &Config, relay_vrf_story: RelayVRFStory, assignment: &AssignmentCert, - backing_group: GroupIndex, + // Backing groups for each assigned core `CoreIndex`. + backing_groups: Vec, ) -> Result; } @@ -256,7 +257,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, relay_vrf_story: RelayVRFStory, assignment: &AssignmentCert, - backing_group: GroupIndex, + backing_groups: Vec, ) -> Result { check_assignment_cert( claimed_core_index, @@ -264,7 +265,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { config, relay_vrf_story, assignment, - backing_group, + backing_groups, ) } } @@ -513,6 +514,7 @@ pub(crate) enum InvalidAssignmentReason { VRFModuloOutputMismatch, VRFDelayCoreIndexMismatch, VRFDelayOutputMismatch, + InvalidArguments, } /// Checks the crypto of an assignment cert. Failure conditions: @@ -527,12 +529,12 @@ pub(crate) enum InvalidAssignmentReason { /// This function does not check whether the core is actually a valid assignment or not. That should be done /// outside the scope of this function. pub(crate) fn check_assignment_cert( - claimed_core_index: Vec, + claimed_core_indices: Vec, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, assignment: &AssignmentCert, - backing_group: GroupIndex, + backing_groups: Vec, ) -> Result { use InvalidAssignmentReason as Reason; @@ -544,19 +546,24 @@ pub(crate) fn check_assignment_cert( let public = schnorrkel::PublicKey::from_bytes(validator_public.as_slice()) .map_err(|_| InvalidAssignment(Reason::InvalidAssignmentKey))?; - for claimed_cores in &claimed_core_index { - if claimed_cores.0 >= config.n_cores { - return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds)) - } + // Check that we have all backing groups for claimed cores. + if claimed_core_indices.is_empty() && claimed_core_indices.len() != backing_groups.len() { + return Err(InvalidAssignment(Reason::InvalidArguments)) } // Check that the validator was not part of the backing group // and not already assigned. - let is_in_backing = - is_in_backing_group(&config.validator_groups, validator_index, backing_group); + for (claimed_core, backing_group) in claimed_core_indices.iter().zip(backing_groups.iter()) { + if claimed_core.0 >= config.n_cores { + return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds)) + } + + let is_in_backing = + is_in_backing_group(&config.validator_groups, validator_index, *backing_group); - if is_in_backing { - return Err(InvalidAssignment(Reason::IsInBackingGroup)) + if is_in_backing { + return Err(InvalidAssignment(Reason::IsInBackingGroup)) + } } let &(ref vrf_output, ref vrf_proof) = &assignment.vrf; @@ -575,12 +582,10 @@ pub(crate) fn check_assignment_cert( ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; - let got_cores = relay_vrf_modulo_cores(&vrf_in_out, *sample + 1, config.n_cores); - println!("Claimed cores: {:?}", &claimed_core_index); - println!("Claimed cores: {:?}", &got_cores); + let resulting_cores = relay_vrf_modulo_cores(&vrf_in_out, *sample + 1, config.n_cores); - // ensure that the `vrf_in_out` actually gives us the claimed cores. - if got_cores == claimed_core_index { + // Ensure that the `vrf_in_out` actually gives us the claimed cores. + if resulting_cores == claimed_core_indices { Ok(0) } else { Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) @@ -596,19 +601,19 @@ pub(crate) fn check_assignment_cert( relay_vrf_modulo_transcript(relay_vrf_story, *sample), &vrf_output.0, &vrf_proof.0, - assigned_core_transcript(claimed_core_index[0]), + assigned_core_transcript(claimed_core_indices[0]), ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; // ensure that the `vrf_in_out` actually gives us the claimed core. - if relay_vrf_modulo_core(&vrf_in_out, config.n_cores) == claimed_core_index[0] { + if relay_vrf_modulo_core(&vrf_in_out, config.n_cores) == claimed_core_indices[0] { Ok(0) } else { Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, AssignmentCertKind::RelayVRFDelay { core_index } => { - if *core_index != claimed_core_index[0] { + if *core_index != claimed_core_indices[0] { return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch)) } @@ -729,7 +734,7 @@ mod tests { ]), n_cores: 2, zeroth_delay_tranche_width: 10, - relay_vrf_modulo_samples: 3, + relay_vrf_modulo_samples: 10, n_delay_tranches: 40, }, vec![(c_a, CoreIndex(0), GroupIndex(1)), (c_b, CoreIndex(1), GroupIndex(0))], @@ -764,7 +769,7 @@ mod tests { ]), n_cores: 2, zeroth_delay_tranche_width: 10, - relay_vrf_modulo_samples: 3, + relay_vrf_modulo_samples: 10, n_delay_tranches: 40, }, vec![(c_a, CoreIndex(0), GroupIndex(0)), (c_b, CoreIndex(1), GroupIndex(1))], @@ -791,7 +796,7 @@ mod tests { validator_groups: Default::default(), n_cores: 0, zeroth_delay_tranche_width: 10, - relay_vrf_modulo_samples: 3, + relay_vrf_modulo_samples: 10, n_delay_tranches: 40, }, vec![], @@ -800,10 +805,11 @@ mod tests { assert!(assignments.is_empty()); } + #[derive(Debug)] struct MutatedAssignment { cores: Vec, cert: AssignmentCert, - group: GroupIndex, + groups: Vec, own_group: GroupIndex, val_index: ValidatorIndex, config: Config, @@ -828,7 +834,7 @@ mod tests { validator_groups: basic_groups(n_validators, n_cores), n_cores: n_cores as u32, zeroth_delay_tranche_width: 10, - relay_vrf_modulo_samples: 3, + relay_vrf_modulo_samples: 15, n_delay_tranches: 40, }; @@ -850,24 +856,25 @@ mod tests { let mut counted = 0; for (core, assignment) in assignments { - let mut mutated = MutatedAssignment { - cores: match assignment.cert.kind.clone() { - AssignmentCertKind::RelayVRFModuloCompact { sample: _, core_indices } => - core_indices, - AssignmentCertKind::RelayVRFModulo { sample: _ } => { - vec![core] - }, - AssignmentCertKind::RelayVRFDelay { core_index } => { - vec![core_index] - }, + let cores = match assignment.cert.kind.clone() { + AssignmentCertKind::RelayVRFModuloCompact { sample: _, core_indices } => + core_indices, + AssignmentCertKind::RelayVRFModulo { sample: _ } => { + vec![core] + }, + AssignmentCertKind::RelayVRFDelay { core_index } => { + vec![core_index] }, - group: group_for_core(core.0 as _), + }; + + let mut mutated = MutatedAssignment { + cores: cores.clone(), + groups: cores.clone().into_iter().map(|core| group_for_core(core.0 as _)).collect(), cert: assignment.cert, own_group: GroupIndex(0), val_index: ValidatorIndex(0), config: config.clone(), }; - let expected = match f(&mut mutated) { None => continue, Some(e) => e, @@ -881,7 +888,7 @@ mod tests { &mutated.config, relay_vrf_story.clone(), &mutated.cert, - mutated.group, + mutated.groups, ) .is_ok(); @@ -907,7 +914,7 @@ mod tests { #[test] fn check_rejects_in_backing_group() { check_mutated_assignments(200, 100, 25, |m| { - m.group = m.own_group; + m.groups[0] = m.own_group; Some(false) }); } diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index e5e3d50c6e81..2d8ea3d91671 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -695,7 +695,7 @@ pub(crate) mod tests { _config: &criteria::Config, _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, _assignment: &polkadot_node_primitives::approval::AssignmentCert, - _backing_group: polkadot_primitives::GroupIndex, + _backing_groups: Vec, ) -> Result { Ok(0) } diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 2d34704168bc..deac22655966 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1207,9 +1207,9 @@ async fn handle_from_overseer( vec![Action::Conclude] }, FromOrchestra::Communication { msg } => match msg { - ApprovalVotingMessage::CheckAndImportAssignment(a, claimed_core, res) => { + ApprovalVotingMessage::CheckAndImportAssignment(a, claimed_cores, res) => { let (check_outcome, actions) = - check_and_import_assignment(state, db, a, claimed_core)?; + check_and_import_assignment(state, db, a, claimed_cores)?; let _ = res.send(check_outcome); actions @@ -1673,7 +1673,7 @@ fn check_and_import_assignment( state: &State, db: &mut OverlayedBackend<'_, impl Backend>, assignment: IndirectAssignmentCert, - candidate_index: CandidateIndex, + candidate_indices: Vec, ) -> SubsystemResult<(AssignmentCheckResult, Vec)> { let tick_now = state.clock.tick_now(); @@ -1699,32 +1699,36 @@ fn check_and_import_assignment( )), }; - let (claimed_core_index, assigned_candidate_hash) = - match block_entry.candidate(candidate_index as usize) { - Some((c, h)) => (*c, *h), + // The Compact VRF modulo assignment cert has multiple core assignments. + let mut backing_groups = Vec::new(); + let mut claimed_core_indices = Vec::new(); + let mut assigned_candidate_hashes = Vec::new(); + + for candidate_index in candidate_indices.iter() { + let (claimed_core_index, assigned_candidate_hash) = + match block_entry.candidate(*candidate_index as usize) { + Some((c, h)) => (*c, *h), + None => + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidateIndex( + *candidate_index, + )), + Vec::new(), + )), // no candidate at core. + }; + + let mut candidate_entry = match db.load_candidate_entry(&assigned_candidate_hash)? { + Some(c) => c, None => return Ok(( - AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidateIndex( - candidate_index, + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( + *candidate_index, + assigned_candidate_hash, )), Vec::new(), - )), // no candidate at core. - }; - - let mut candidate_entry = match db.load_candidate_entry(&assigned_candidate_hash)? { - Some(c) => c, - None => - return Ok(( - AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( - candidate_index, - assigned_candidate_hash, )), - Vec::new(), - )), - }; + }; - let res = { - // import the assignment. let approval_entry = match candidate_entry.approval_entry_mut(&assignment.block_hash) { Some(a) => a, None => @@ -1737,40 +1741,95 @@ fn check_and_import_assignment( )), }; - let res = state.assignment_criteria.check_assignment_cert( - vec![claimed_core_index], - assignment.validator, - &criteria::Config::from(session_info), - block_entry.relay_vrf_story(), - &assignment.cert, - approval_entry.backing_group(), - ); + backing_groups.push(approval_entry.backing_group()); + claimed_core_indices.push(claimed_core_index); + assigned_candidate_hashes.push(assigned_candidate_hash); + } - let tranche = match res { - Err(crate::criteria::InvalidAssignment(reason)) => - return Ok(( - AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCert( - assignment.validator, - format!("{:?}", reason), - )), - Vec::new(), + // Check the assignment certificate. + let res = state.assignment_criteria.check_assignment_cert( + claimed_core_indices.clone(), + assignment.validator, + &criteria::Config::from(session_info), + block_entry.relay_vrf_story(), + &assignment.cert, + backing_groups, + ); + + let tranche = match res { + Err(crate::criteria::InvalidAssignment(reason)) => + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCert( + assignment.validator, + format!("{:?}", reason), )), - Ok(tranche) => { - let current_tranche = - state.clock.tranche_now(state.slot_duration_millis, block_entry.slot()); + Vec::new(), + )), + Ok(tranche) => { + let current_tranche = + state.clock.tranche_now(state.slot_duration_millis, block_entry.slot()); - let too_far_in_future = current_tranche + TICK_TOO_FAR_IN_FUTURE as DelayTranche; + let too_far_in_future = current_tranche + TICK_TOO_FAR_IN_FUTURE as DelayTranche; - if tranche >= too_far_in_future { - return Ok((AssignmentCheckResult::TooFarInFuture, Vec::new())) - } + if tranche >= too_far_in_future { + return Ok((AssignmentCheckResult::TooFarInFuture, Vec::new())) + } - tranche - }, - }; + tranche + }, + }; + + let mut actions = Vec::new(); + let res = { + let mut is_duplicate = false; + // Import the assignments for all cores in the cert. + for (assigned_candidate_hash, candidate_index) in + assigned_candidate_hashes.iter().zip(candidate_indices) + { + let mut candidate_entry = match db.load_candidate_entry(&assigned_candidate_hash)? { + Some(c) => c, + None => + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( + candidate_index, + *assigned_candidate_hash, + )), + Vec::new(), + )), + }; - let is_duplicate = approval_entry.is_assigned(assignment.validator); - approval_entry.import_assignment(tranche, assignment.validator, tick_now); + let approval_entry = match candidate_entry.approval_entry_mut(&assignment.block_hash) { + Some(a) => a, + None => + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::Internal( + assignment.block_hash, + *assigned_candidate_hash, + )), + Vec::new(), + )), + }; + is_duplicate |= approval_entry.is_assigned(assignment.validator); + approval_entry.import_assignment(tranche, assignment.validator, tick_now); + + // We've imported a new assignment, so we need to schedule a wake-up for when that might no-show. + if let Some((approval_entry, status)) = + state.approval_status(&block_entry, &candidate_entry) + { + actions.extend(schedule_wakeup_action( + approval_entry, + block_entry.block_hash(), + block_entry.block_number(), + *assigned_candidate_hash, + status.block_tick, + tick_now, + status.required_tranches, + )); + } + + // We also write the candidate entry as it now contains the new candidate. + db.write_candidate_entry(candidate_entry.into()); + } if is_duplicate { AssignmentCheckResult::AcceptedDuplicate @@ -1778,33 +1837,15 @@ fn check_and_import_assignment( gum::trace!( target: LOG_TARGET, validator = assignment.validator.0, - candidate_hash = ?assigned_candidate_hash, - para_id = ?candidate_entry.candidate_receipt().descriptor.para_id, - "Imported assignment.", + candidate_hashes = ?assigned_candidate_hashes, + assigned_cores = ?claimed_core_indices, + "Imported assignments for multiple cores.", ); AssignmentCheckResult::Accepted } }; - let mut actions = Vec::new(); - - // We've imported a new approval, so we need to schedule a wake-up for when that might no-show. - if let Some((approval_entry, status)) = state.approval_status(&block_entry, &candidate_entry) { - actions.extend(schedule_wakeup_action( - approval_entry, - block_entry.block_hash(), - block_entry.block_number(), - assigned_candidate_hash, - status.block_tick, - tick_now, - status.required_tranches, - )); - } - - // We also write the candidate entry as it now contains the new candidate. - db.write_candidate_entry(candidate_entry.into()); - Ok((res, actions)) } diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index a8359f58205e..584ae0410f6e 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -251,7 +251,7 @@ where _config: &criteria::Config, _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, _assignment: &polkadot_node_primitives::approval::AssignmentCert, - _backing_group: polkadot_primitives::GroupIndex, + _backing_groups: Vec, ) -> Result { self.1(validator_index) } @@ -625,7 +625,7 @@ async fn check_and_import_assignment( validator, cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }), }, - candidate_index, + vec![candidate_index], tx, ), }, @@ -1113,7 +1113,7 @@ fn blank_subsystem_act_on_bad_block() { sample: 0, }), }, - 0u32, + vec![0u32], tx, ), }, @@ -1781,7 +1781,7 @@ fn linear_import_act_on_leaf() { sample: 0, }), }, - 0u32, + vec![0u32], tx, ), }, @@ -1851,7 +1851,7 @@ fn forkful_import_at_same_height_act_on_leaf() { sample: 0, }), }, - 0u32, + vec![0u32], tx, ), }, From 82c2b5f88d76cddea6e026b9a522a7607188a556 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 10 Feb 2023 12:51:33 +0000 Subject: [PATCH 006/105] integration step 1: subsystem message changes Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/lib.rs | 49 +++++++++++++++++++++++--- node/core/approval-voting/src/tests.rs | 22 +++++++++--- node/subsystem-types/src/messages.rs | 4 +-- 3 files changed, 64 insertions(+), 11 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index deac22655966..62e83fea3623 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -24,7 +24,8 @@ use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ - BlockApprovalMeta, DelayTranche, IndirectAssignmentCert, IndirectSignedApprovalVote, + AssignmentCert, AssignmentCertKind, BlockApprovalMeta, DelayTranche, + IndirectAssignmentCert, IndirectSignedApprovalVote, }, ValidationResult, APPROVAL_EXECUTION_TIMEOUT, }; @@ -428,7 +429,8 @@ struct ApprovalVoteRequest { #[derive(Default)] struct Wakeups { - // Tick -> [(Relay Block, Candidate Hash)] + // Tick -> [(Relay Block, Vec of Candidate Hash)] + // For Compact modulo VRF wakeups we want to wake-up once for all candidates wakeups: BTreeMap>, reverse_wakeups: HashMap<(Hash, CandidateHash), Tick>, block_numbers: BTreeMap>, @@ -972,9 +974,23 @@ async fn handle_actions( let block_hash = indirect_cert.block_hash; let validator_index = indirect_cert.validator; + // Find all candidates indices for the certificate claimed cores. + let block_entry = match overlayed_db.load_block_entry(&block_hash)? { + Some(b) => b, + None => { + gum::warn!(target: LOG_TARGET, ?block_hash, "Missing block entry"); + + continue + }, + }; + + // Get all candidate indices in case this is a compact module vrf assignment. + let candidate_indices = + cores_to_candidate_indices(&block_entry, candidate_index, &indirect_cert.cert); + ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment( indirect_cert, - candidate_index, + candidate_indices, )); match approvals_cache.get(&candidate_hash) { @@ -1032,6 +1048,29 @@ async fn handle_actions( Ok(conclude) } +fn cores_to_candidate_indices( + block_entry: &BlockEntry, + candidate_index: CandidateIndex, + cert: &AssignmentCert, +) -> Vec { + let mut candidate_indices = Vec::new(); + match &cert.kind { + AssignmentCertKind::RelayVRFModuloCompact { sample: _, core_indices } => { + for cert_core_index in core_indices { + if let Some(candidate_index) = block_entry + .candidates() + .iter() + .position(|(core_index, _)| core_index == cert_core_index) + { + candidate_indices.push(candidate_index as _) + } + } + }, + _ => candidate_indices.push(candidate_index as _), + } + candidate_indices +} + fn distribution_messages_for_activation( db: &OverlayedBackend<'_, impl Backend>, ) -> SubsystemResult> { @@ -1086,7 +1125,7 @@ fn distribution_messages_for_activation( validator: assignment.validator_index(), cert: assignment.cert().clone(), }, - i as _, + cores_to_candidate_indices(&block_entry, i as _, assignment.cert()), )); }, (Some(assignment), Some(approval_sig)) => { @@ -1096,7 +1135,7 @@ fn distribution_messages_for_activation( validator: assignment.validator_index(), cert: assignment.cert().clone(), }, - i as _, + cores_to_candidate_indices(&block_entry, i as _, assignment.cert()), )); messages.push(ApprovalDistributionMessage::DistributeApproval( diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 584ae0410f6e..b5cc00974fdd 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -2303,6 +2303,20 @@ fn subsystem_validate_approvals_cache() { } .into(), ); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v1::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModuloCompact { + sample: 0, + core_indices: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)], + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); assignments }, |_| Ok(0), @@ -2406,9 +2420,9 @@ pub async fn handle_double_assignment_import( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( _, - c_index, + c_indices, )) => { - assert_eq!(candidate_index, c_index); + assert_eq!(vec![candidate_index], c_indices); } ); @@ -2421,9 +2435,9 @@ pub async fn handle_double_assignment_import( for msg in vec![first_message, second_message].into_iter() { match msg { AllMessages::ApprovalDistribution( - ApprovalDistributionMessage::DistributeAssignment(_, c_index), + ApprovalDistributionMessage::DistributeAssignment(_, c_indices), ) => { - assert_eq!(candidate_index, c_index); + assert_eq!(vec![candidate_index], c_indices); }, AllMessages::CandidateValidation( CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx), diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 506e37d2cc92..f159a5f80482 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -770,7 +770,7 @@ pub enum ApprovalVotingMessage { /// Should not be sent unless the block hash is known. CheckAndImportAssignment( IndirectAssignmentCert, - CandidateIndex, + Vec, oneshot::Sender, ), /// Check if the approval vote is valid and can be accepted by our view of the @@ -805,7 +805,7 @@ pub enum ApprovalDistributionMessage { NewBlocks(Vec), /// Distribute an assignment cert from the local validator. The cert is assumed /// to be valid, relevant, and for the given relay-parent and validator index. - DistributeAssignment(IndirectAssignmentCert, CandidateIndex), + DistributeAssignment(IndirectAssignmentCert, Vec), /// Distribute an approval vote for the local validator. The approval vote is assumed to be /// valid, relevant, and the corresponding approval already issued. /// If not, the subsystem is free to drop the message. From d9d56810f531dc62519391c18054d8029dabd73b Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 10 Feb 2023 12:56:23 +0000 Subject: [PATCH 007/105] Approval distribution WIP Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 173 +++++++++++------- node/network/protocol/src/lib.rs | 2 +- 2 files changed, 105 insertions(+), 70 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 3c6ed8661e0e..a86e483de5f9 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -197,6 +197,8 @@ struct Knowledge { // When there is an entry with `MessageKind::Assignment`, the assignment is known. // When there is an entry with `MessageKind::Approval`, the assignment and approval are known. known_messages: HashMap, + // // A mapping from tranche0 compact VRF assignments which claim multiple candidates. + // known_compact_vrf_assignments: HashMap<(Hash, ValidatorIndex), Vec>, } impl Knowledge { @@ -209,6 +211,16 @@ impl Knowledge { } } + // Insert multiple messages of same kind. This is only used for multiple core assignments in a + // Compact VRF modile assiginments. + fn insert_many(&mut self, messages: Vec, kind: MessageKind) -> bool { + messages + .into_iter() + .map(|message| self.insert(message, kind)) + .collect::>() + .sum() + } + fn insert(&mut self, message: MessageSubject, kind: MessageKind) -> bool { match self.known_messages.entry(message) { hash_map::Entry::Vacant(vacant) => { @@ -262,22 +274,29 @@ struct BlockEntry { #[derive(Debug)] enum ApprovalState { - Assigned(AssignmentCert), - Approved(AssignmentCert, ValidatorSignature), + Assigned(AssignmentCert, Vec), + Approved(AssignmentCert, Vec, ValidatorSignature), } impl ApprovalState { fn assignment_cert(&self) -> &AssignmentCert { match *self { - ApprovalState::Assigned(ref cert) => cert, - ApprovalState::Approved(ref cert, _) => cert, + ApprovalState::Assigned(ref cert, _) => cert, + ApprovalState::Approved(ref cert, _, _) => cert, + } + } + + fn candidate_indices(&self) -> &Vec { + match *self { + ApprovalState::Assigned(_, ref candidate_indices) => candidate_indices, + ApprovalState::Approved(_, ref candidate_indices, _) => candidate_indices, } } fn approval_signature(&self) -> Option { match *self { - ApprovalState::Assigned(_) => None, - ApprovalState::Approved(_, ref sig) => Some(sig.clone()), + ApprovalState::Assigned(_, _) => None, + ApprovalState::Approved(_, _, ref sig) => Some(sig.clone()), } } } @@ -549,23 +568,28 @@ impl State { num = assignments.len(), "Processing assignments from a peer", ); - for (assignment, claimed_index) in assignments.into_iter() { + for (assignment, claimed_indices) in assignments.into_iter() { if let Some(pending) = self.pending_known.get_mut(&assignment.block_hash) { - let message_subject = MessageSubject( - assignment.block_hash, - claimed_index, - assignment.validator, - ); + // We decompose the assignments as we track them individually. + for claimed_index in claimed_indices { + let message_subject = MessageSubject( + assignment.block_hash, + claimed_index, + assignment.validator, + ); - gum::trace!( - target: LOG_TARGET, - %peer_id, - ?message_subject, - "Pending assignment", - ); + gum::trace!( + target: LOG_TARGET, + %peer_id, + ?message_subject, + "Pending assignment", + ); - pending - .push((peer_id, PendingMessage::Assignment(assignment, claimed_index))); + pending.push(( + peer_id, + PendingMessage::Assignment(assignment, claimed_index), + )); + } continue } @@ -703,7 +727,7 @@ impl State { metrics: &Metrics, source: MessageSource, assignment: IndirectAssignmentCert, - claimed_candidate_index: CandidateIndex, + claimed_candidate_indices: Vec, rng: &mut R, ) where R: CryptoRng + Rng, @@ -731,7 +755,8 @@ impl State { }; // compute metadata on the assignment. - let message_subject = MessageSubject(block_hash, claimed_candidate_index, validator_index); + let message_subject = + MessageSubject(block_hash, claimed_candidate_indices, validator_index); let message_kind = MessageKind::Assignment; if let Some(peer_id) = source.peer_id() { @@ -778,7 +803,7 @@ impl State { ctx.send_message(ApprovalVotingMessage::CheckAndImportAssignment( assignment.clone(), - claimed_candidate_index, + claimed_candidate_indices, tx, )) .await; @@ -874,28 +899,52 @@ impl State { t.local_grid_neighbors().required_routing_by_index(validator_index, local) }); - let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { - Some(candidate_entry) => { - // set the approval state for validator_index to Assigned - // unless the approval state is set already - candidate_entry.messages.entry(validator_index).or_insert_with(|| MessageState { - required_routing, - local, - random_routing: Default::default(), - approval_state: ApprovalState::Assigned(assignment.cert.clone()), - }) - }, - None => { - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?claimed_candidate_index, - "Expected a candidate entry on import_and_circulate_assignment", - ); + let assignments = vec![(assignment, claimed_candidate_indices)]; + let n_peers_total = self.peer_views.len(); + let source_peer = source.peer_id(); - return - }, - }; + // Loop over all candidates in this assignment. + // TODO: Track message state separately for compact vrf assignments. + + // Note: at this point, we haven't received the message from any peers + // other than the source peer, and we just got it, so we haven't sent it + // to any peers either. + + let mut route_random = None; + for claimed_candidate_index in claimed_candidate_indices { + let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { + Some(candidate_entry) => { + // set the approval state for validator_index to Assigned + // unless the approval state is set already + candidate_entry.messages.entry(validator_index).or_insert_with(|| { + MessageState { + required_routing, + local, + random_routing: Default::default(), + approval_state: ApprovalState::Assigned(assignment.cert.clone()), + } + }) + }, + None => { + gum::warn!( + target: LOG_TARGET, + hash = ?block_hash, + ?claimed_candidate_index, + "Expected a candidate entry on import_and_circulate_assignment", + ); + + return + }, + }; + + if route_random.is_none() { + route_random = message_state.random_routing.sample(n_peers_total, rng); + } + + if let Some(true) = route_random { + message_state.random_routing.inc_sent(); + } + } // Dispatch the message to all peers in the routing set which // know the block. @@ -903,10 +952,6 @@ impl State { // If the topology isn't known yet (race with networking subsystems) // then messages will be sent when we get it. - let assignments = vec![(assignment, claimed_candidate_index)]; - let n_peers_total = self.peer_views.len(); - let source_peer = source.peer_id(); - let mut peer_filter = move |peer| { if Some(peer) == source_peer.as_ref() { return false @@ -918,17 +963,6 @@ impl State { { return true } - - // Note: at this point, we haven't received the message from any peers - // other than the source peer, and we just got it, so we haven't sent it - // to any peers either. - let route_random = message_state.random_routing.sample(n_peers_total, rng); - - if route_random { - message_state.random_routing.inc_sent(); - } - - route_random }; let peers = entry.known_by.keys().filter(|p| peer_filter(p)).cloned().collect::>(); @@ -945,7 +979,7 @@ impl State { gum::trace!( target: LOG_TARGET, ?block_hash, - ?claimed_candidate_index, + ?claimed_candidate_indices, local = source.peer_id().is_none(), num_peers = peers.len(), "Sending an assignment to peers", @@ -1340,7 +1374,7 @@ impl State { validator: *validator, cert: message_state.approval_state.assignment_cert().clone(), }, - candidate_index, + message_state.approval_state.candidate_indices().clone(), ); let approval_message = @@ -1540,6 +1574,7 @@ async fn adjust_required_routing_and_propagate { state.handle_new_blocks(ctx, metrics, metas, rng).await; }, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index) => { + ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { gum::debug!( target: LOG_TARGET, - "Distributing our assignment on candidate (block={}, index={})", + "Distributing our assignment on candidate (block={}, indices={:?})", cert.block_hash, - candidate_index, + candidate_indices, ); state @@ -1695,7 +1730,7 @@ impl ApprovalDistribution { &metrics, MessageSource::Local, cert, - candidate_index, + candidate_indices, rng, ) .await; @@ -1765,7 +1800,7 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, + assignments: Vec<(IndirectAssignmentCert, Vec)>, peer: PeerId, ) { let mut batches = assignments.into_iter().peekable(); diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 63024e0fd3f6..7625fddd346a 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -489,7 +489,7 @@ pub mod v1 { /// /// Actually checking the assignment may yield a different result. #[codec(index = 0)] - Assignments(Vec<(IndirectAssignmentCert, CandidateIndex)>), + Assignments(Vec<(IndirectAssignmentCert, Vec)>), /// Approvals for candidates in some recent, unfinalized block. #[codec(index = 1)] Approvals(Vec), From 596a3474ba92c7583b685b63803d6efad789f82b Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 10 Feb 2023 13:27:41 +0000 Subject: [PATCH 008/105] approval-dist: fixed compilation errors Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 86 +++++++++++-------- 1 file changed, 49 insertions(+), 37 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index a86e483de5f9..cfefdf135ba6 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -217,8 +217,7 @@ impl Knowledge { messages .into_iter() .map(|message| self.insert(message, kind)) - .collect::>() - .sum() + .fold(false, |result, is_ok| result && is_ok) } fn insert(&mut self, message: MessageSubject, kind: MessageKind) -> bool { @@ -336,7 +335,7 @@ impl MessageSource { } enum PendingMessage { - Assignment(IndirectAssignmentCert, CandidateIndex), + Assignment(IndirectAssignmentCert, Vec), Approval(IndirectSignedApprovalVote), } @@ -491,6 +490,7 @@ impl State { for (peer_id, message) in to_import { match message { + // TODO: We need to be able to get all claimed candidates here. PendingMessage::Assignment(assignment, claimed_index) => { self.import_and_circulate_assignment( ctx, @@ -568,28 +568,22 @@ impl State { num = assignments.len(), "Processing assignments from a peer", ); - for (assignment, claimed_indices) in assignments.into_iter() { + for (assignment, claimed_candidate_indices) in assignments.into_iter() { if let Some(pending) = self.pending_known.get_mut(&assignment.block_hash) { - // We decompose the assignments as we track them individually. - for claimed_index in claimed_indices { - let message_subject = MessageSubject( - assignment.block_hash, - claimed_index, - assignment.validator, - ); - - gum::trace!( - target: LOG_TARGET, - %peer_id, - ?message_subject, - "Pending assignment", - ); + gum::trace!( + target: LOG_TARGET, + %peer_id, + ?claimed_candidate_indices, + "Pending assignment", + ); - pending.push(( - peer_id, - PendingMessage::Assignment(assignment, claimed_index), - )); - } + pending.push(( + peer_id, + PendingMessage::Assignment( + assignment.clone(), + claimed_candidate_indices, + ), + )); continue } @@ -599,7 +593,7 @@ impl State { metrics, MessageSource::Peer(peer_id), assignment, - claimed_index, + claimed_candidate_indices, rng, ) .await; @@ -754,9 +748,9 @@ impl State { }, }; - // compute metadata on the assignment. + // compute metadata on first candidate in the assignment. let message_subject = - MessageSubject(block_hash, claimed_candidate_indices, validator_index); + MessageSubject(block_hash, claimed_candidate_indices[0], validator_index); let message_kind = MessageKind::Assignment; if let Some(peer_id) = source.peer_id() { @@ -765,8 +759,18 @@ impl State { hash_map::Entry::Occupied(mut peer_knowledge) => { let peer_knowledge = peer_knowledge.get_mut(); if peer_knowledge.contains(&message_subject, message_kind) { - // wasn't included before - if !peer_knowledge.received.insert(message_subject.clone(), message_kind) { + // Decompose to per candidate knowledge of assignment. + let mut message_subjects = Vec::new(); + for candidate_index in claimed_candidate_indices.clone() { + message_subjects.push(MessageSubject( + block_hash, + candidate_index, + validator_index, + )); + } + + // Checks if known already. + if !peer_knowledge.received.insert_many(message_subjects, message_kind) { gum::debug!( target: LOG_TARGET, ?peer_id, @@ -803,7 +807,7 @@ impl State { ctx.send_message(ApprovalVotingMessage::CheckAndImportAssignment( assignment.clone(), - claimed_candidate_indices, + claimed_candidate_indices.clone(), tx, )) .await; @@ -899,7 +903,7 @@ impl State { t.local_grid_neighbors().required_routing_by_index(validator_index, local) }); - let assignments = vec![(assignment, claimed_candidate_indices)]; + let assignments = vec![(assignment.clone(), claimed_candidate_indices.clone())]; let n_peers_total = self.peer_views.len(); let source_peer = source.peer_id(); @@ -911,9 +915,11 @@ impl State { // to any peers either. let mut route_random = None; - for claimed_candidate_index in claimed_candidate_indices { + for claimed_candidate_index in claimed_candidate_indices.clone() { let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { Some(candidate_entry) => { + let claimed_candidate_indices = claimed_candidate_indices.clone(); + let assignment = assignment.clone(); // set the approval state for validator_index to Assigned // unless the approval state is set already candidate_entry.messages.entry(validator_index).or_insert_with(|| { @@ -921,7 +927,10 @@ impl State { required_routing, local, random_routing: Default::default(), - approval_state: ApprovalState::Assigned(assignment.cert.clone()), + approval_state: ApprovalState::Assigned( + assignment.cert, + claimed_candidate_indices, + ), } }) }, @@ -938,7 +947,7 @@ impl State { }; if route_random.is_none() { - route_random = message_state.random_routing.sample(n_peers_total, rng); + route_random = Some(message_state.random_routing.sample(n_peers_total, rng)); } if let Some(true) = route_random { @@ -952,7 +961,7 @@ impl State { // If the topology isn't known yet (race with networking subsystems) // then messages will be sent when we get it. - let mut peer_filter = move |peer| { + let peer_filter = move |peer| { if Some(peer) == source_peer.as_ref() { return false } @@ -963,6 +972,8 @@ impl State { { return true } + + false }; let peers = entry.known_by.keys().filter(|p| peer_filter(p)).cloned().collect::>(); @@ -1142,7 +1153,7 @@ impl State { // it should be in assigned state already match candidate_entry.messages.remove(&validator_index) { Some(MessageState { - approval_state: ApprovalState::Assigned(cert), + approval_state: ApprovalState::Assigned(cert, candidate_indices), required_routing, local, random_routing, @@ -1152,6 +1163,7 @@ impl State { MessageState { approval_state: ApprovalState::Approved( cert, + candidate_indices, vote.signature.clone(), ), required_routing, @@ -1291,8 +1303,8 @@ impl State { let sigs = candidate_entry.messages.iter().filter_map(|(validator_index, message_state)| { match &message_state.approval_state { - ApprovalState::Approved(_, sig) => Some((*validator_index, sig.clone())), - ApprovalState::Assigned(_) => None, + ApprovalState::Approved(_, _, sig) => Some((*validator_index, sig.clone())), + ApprovalState::Assigned(_, _) => None, } }); all_sigs.extend(sigs); From c87502bf3fe474578f107184194ae7a001e85719 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 10 Feb 2023 13:38:55 +0000 Subject: [PATCH 009/105] test fixups, WIP Signed-off-by: Andrei Sandu --- .../approval-distribution/src/tests.rs | 45 ++++++++++--------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 459b9d4899fb..bbaa2d744729 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -323,7 +323,7 @@ fn try_import_the_same_assignment() { // send the assignment related to `hash` let validator_index = ValidatorIndex(0); let cert = fake_assignment_cert(hash, validator_index); - let assignments = vec![(cert.clone(), 0u32)]; + let assignments = vec![(cert.clone(), vec![0u32])]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); send_message_from_peer(overseer, &peer_a, msg).await; @@ -335,10 +335,11 @@ fn try_import_the_same_assignment() { overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - 0u32, + claimed_candidate_indices, tx, )) => { assert_eq!(assignment, cert); + assert_eq!(claimed_candidate_indices, vec![0u32]); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -410,7 +411,7 @@ fn spam_attack_results_in_negative_reputation_change() { .map(|candidate_index| { let validator_index = ValidatorIndex(candidate_index as u32); let cert = fake_assignment_cert(hash_b, validator_index); - (cert, candidate_index as u32) + (cert, vec![candidate_index as u32]) }) .collect(); @@ -492,7 +493,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), ) .await; @@ -522,7 +523,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { // but if someone else is sending it the same assignment // the peer could send us it as well - let assignments = vec![(cert, candidate_index)]; + let assignments = vec![(cert, vec![candidate_index])]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); send_message_from_peer(overseer, peer, msg.clone()).await; @@ -570,7 +571,7 @@ fn import_approval_happy_path() { let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert, vec![candidate_index]), ) .await; @@ -668,7 +669,7 @@ fn import_approval_bad() { expect_reputation_change(overseer, &peer_b, COST_UNEXPECTED_MESSAGE).await; // now import an assignment from peer_b - let assignments = vec![(cert.clone(), candidate_index)]; + let assignments = vec![(cert.clone(), vec![candidate_index])]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); send_message_from_peer(overseer, &peer_b, msg).await; @@ -676,11 +677,11 @@ fn import_approval_bad() { overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - i, + candidate_indices, tx, )) => { assert_eq!(assignment, cert); - assert_eq!(i, candidate_index); + assert_eq!(candidate_indices[0], candidate_index); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -825,9 +826,11 @@ fn update_peer_view() { let cert_a = fake_assignment_cert(hash_a, ValidatorIndex(0)); let cert_b = fake_assignment_cert(hash_b, ValidatorIndex(0)); - overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_a, 0)).await; + overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_a, vec![0])) + .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_b, 0)).await; + overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_b, vec![0])) + .await; // connect a peer setup_peer_with_view(overseer, peer, view![hash_a]).await; @@ -879,7 +882,7 @@ fn update_peer_view() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert_c.clone(), 0), + ApprovalDistributionMessage::DistributeAssignment(cert_c.clone(), vec![0]), ) .await; @@ -963,7 +966,7 @@ fn import_remotely_then_locally() { let validator_index = ValidatorIndex(0); let candidate_index = 0u32; let cert = fake_assignment_cert(hash, validator_index); - let assignments = vec![(cert.clone(), candidate_index)]; + let assignments = vec![(cert.clone(), vec![candidate_index])]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); send_message_from_peer(overseer, peer, msg).await; @@ -972,11 +975,11 @@ fn import_remotely_then_locally() { overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - i, + candidate_indices, tx, )) => { assert_eq!(assignment, cert); - assert_eq!(i, candidate_index); + assert_eq!(candidate_indices[0], candidate_index); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -986,7 +989,7 @@ fn import_remotely_then_locally() { // import the same assignment locally overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert, vec![candidate_index]), ) .await; @@ -1058,7 +1061,7 @@ fn sends_assignments_even_when_state_is_approved() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), ) .await; @@ -1236,7 +1239,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), ) .await; @@ -1484,7 +1487,7 @@ fn propagates_to_required_after_connect() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), ) .await; @@ -1609,7 +1612,7 @@ fn sends_to_more_peers_after_getting_topology() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), ) .await; @@ -1768,7 +1771,7 @@ fn originator_aggression_l1() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), ) .await; From cc9f63ad813e02379116fd5b1591f181b8d6038a Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 13 Feb 2023 13:15:42 +0000 Subject: [PATCH 010/105] itertools Signed-off-by: Andrei Sandu --- Cargo.lock | 5 +++-- node/core/approval-voting/Cargo.toml | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1e038afdec62..826e10e74d7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3406,9 +3406,9 @@ checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] @@ -6775,6 +6775,7 @@ dependencies = [ "derive_more", "futures", "futures-timer", + "itertools", "kvdb", "kvdb-memorydb", "lru 0.9.0", diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml index 5264c9f335cb..50ef6873f20c 100644 --- a/node/core/approval-voting/Cargo.toml +++ b/node/core/approval-voting/Cargo.toml @@ -16,6 +16,7 @@ schnorrkel = "0.9.1" kvdb = "0.13.0" derive_more = "0.99.17" thiserror = "1.0.31" +itertools = "0.10.5" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } From eb86e562b8c1c309aeb21d45d2bf10fcd085b2a3 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 13 Feb 2023 13:16:12 +0000 Subject: [PATCH 011/105] Allow claimed cores to be a subset of vrf output assignments Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 31 ++++++++++++++++++++--- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 933326f300f2..b35758046175 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -30,6 +30,7 @@ use sp_application_crypto::ByteArray; use merlin::Transcript; use schnorrkel::vrf::VRFInOut; +use itertools::Itertools; use std::collections::{hash_map::Entry, HashMap}; use super::LOG_TARGET; @@ -140,6 +141,7 @@ fn relay_vrf_modulo_cores( .chunks_exact(4) .take(num_samples as usize) .map(move |sample| CoreIndex(u32::from_le_bytes(clone_into_array(&sample)) % max_cores)) + .unique() .collect::>() } @@ -403,7 +405,7 @@ fn compute_relay_vrf_modulo_assignments( ?assigned_cores, ?validator_index, tranche = 0, - "RelayVRFModulo Assignment." + "RelayVRFModuloCompact Assignment." ); Some(assigned_cores_transcript(assigned_cores)) @@ -584,10 +586,24 @@ pub(crate) fn check_assignment_cert( let resulting_cores = relay_vrf_modulo_cores(&vrf_in_out, *sample + 1, config.n_cores); - // Ensure that the `vrf_in_out` actually gives us the claimed cores. - if resulting_cores == claimed_core_indices { + // TODO: Enforce that all claimable cores are claimed. Currently validators can opt out of checking specific cores. + // This is similar to how validator can opt out and not send assignments in the first place. + // However it can happen that malicious nodes modify the assignment and remove some of the claimed cores from it, + // but this shouldnt be a problem as we will eventually receive the original assignment assuming 1/3 malicious. + // + // Ensure that the `vrf_in_out` actually includes all of the claimed cores. + if claimed_core_indices + .iter() + .fold(true, |cores_match, core| cores_match & resulting_cores.contains(core)) + { Ok(0) } else { + gum::debug!( + target: LOG_TARGET, + ?resulting_cores, + ?claimed_core_indices, + "Assignment claimed cores mismatch", + ); Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, @@ -605,10 +621,17 @@ pub(crate) fn check_assignment_cert( ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; + let core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); // ensure that the `vrf_in_out` actually gives us the claimed core. - if relay_vrf_modulo_core(&vrf_in_out, config.n_cores) == claimed_core_indices[0] { + if core == claimed_core_indices[0] { Ok(0) } else { + gum::debug!( + target: LOG_TARGET, + ?core, + ?claimed_core_indices, + "Assignment claimed cores mismatch", + ); Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, From 4d421d289b1e5c10aa706bc3fbe6cf98f7abdddb Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 13 Feb 2023 13:19:00 +0000 Subject: [PATCH 012/105] Revert approval distribution changes Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 193 +++++++----------- .../approval-distribution/src/tests.rs | 45 ++-- 2 files changed, 94 insertions(+), 144 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index cfefdf135ba6..3c6ed8661e0e 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -197,8 +197,6 @@ struct Knowledge { // When there is an entry with `MessageKind::Assignment`, the assignment is known. // When there is an entry with `MessageKind::Approval`, the assignment and approval are known. known_messages: HashMap, - // // A mapping from tranche0 compact VRF assignments which claim multiple candidates. - // known_compact_vrf_assignments: HashMap<(Hash, ValidatorIndex), Vec>, } impl Knowledge { @@ -211,15 +209,6 @@ impl Knowledge { } } - // Insert multiple messages of same kind. This is only used for multiple core assignments in a - // Compact VRF modile assiginments. - fn insert_many(&mut self, messages: Vec, kind: MessageKind) -> bool { - messages - .into_iter() - .map(|message| self.insert(message, kind)) - .fold(false, |result, is_ok| result && is_ok) - } - fn insert(&mut self, message: MessageSubject, kind: MessageKind) -> bool { match self.known_messages.entry(message) { hash_map::Entry::Vacant(vacant) => { @@ -273,29 +262,22 @@ struct BlockEntry { #[derive(Debug)] enum ApprovalState { - Assigned(AssignmentCert, Vec), - Approved(AssignmentCert, Vec, ValidatorSignature), + Assigned(AssignmentCert), + Approved(AssignmentCert, ValidatorSignature), } impl ApprovalState { fn assignment_cert(&self) -> &AssignmentCert { match *self { - ApprovalState::Assigned(ref cert, _) => cert, - ApprovalState::Approved(ref cert, _, _) => cert, - } - } - - fn candidate_indices(&self) -> &Vec { - match *self { - ApprovalState::Assigned(_, ref candidate_indices) => candidate_indices, - ApprovalState::Approved(_, ref candidate_indices, _) => candidate_indices, + ApprovalState::Assigned(ref cert) => cert, + ApprovalState::Approved(ref cert, _) => cert, } } fn approval_signature(&self) -> Option { match *self { - ApprovalState::Assigned(_, _) => None, - ApprovalState::Approved(_, _, ref sig) => Some(sig.clone()), + ApprovalState::Assigned(_) => None, + ApprovalState::Approved(_, ref sig) => Some(sig.clone()), } } } @@ -335,7 +317,7 @@ impl MessageSource { } enum PendingMessage { - Assignment(IndirectAssignmentCert, Vec), + Assignment(IndirectAssignmentCert, CandidateIndex), Approval(IndirectSignedApprovalVote), } @@ -490,7 +472,6 @@ impl State { for (peer_id, message) in to_import { match message { - // TODO: We need to be able to get all claimed candidates here. PendingMessage::Assignment(assignment, claimed_index) => { self.import_and_circulate_assignment( ctx, @@ -568,22 +549,23 @@ impl State { num = assignments.len(), "Processing assignments from a peer", ); - for (assignment, claimed_candidate_indices) in assignments.into_iter() { + for (assignment, claimed_index) in assignments.into_iter() { if let Some(pending) = self.pending_known.get_mut(&assignment.block_hash) { + let message_subject = MessageSubject( + assignment.block_hash, + claimed_index, + assignment.validator, + ); + gum::trace!( target: LOG_TARGET, %peer_id, - ?claimed_candidate_indices, + ?message_subject, "Pending assignment", ); - pending.push(( - peer_id, - PendingMessage::Assignment( - assignment.clone(), - claimed_candidate_indices, - ), - )); + pending + .push((peer_id, PendingMessage::Assignment(assignment, claimed_index))); continue } @@ -593,7 +575,7 @@ impl State { metrics, MessageSource::Peer(peer_id), assignment, - claimed_candidate_indices, + claimed_index, rng, ) .await; @@ -721,7 +703,7 @@ impl State { metrics: &Metrics, source: MessageSource, assignment: IndirectAssignmentCert, - claimed_candidate_indices: Vec, + claimed_candidate_index: CandidateIndex, rng: &mut R, ) where R: CryptoRng + Rng, @@ -748,9 +730,8 @@ impl State { }, }; - // compute metadata on first candidate in the assignment. - let message_subject = - MessageSubject(block_hash, claimed_candidate_indices[0], validator_index); + // compute metadata on the assignment. + let message_subject = MessageSubject(block_hash, claimed_candidate_index, validator_index); let message_kind = MessageKind::Assignment; if let Some(peer_id) = source.peer_id() { @@ -759,18 +740,8 @@ impl State { hash_map::Entry::Occupied(mut peer_knowledge) => { let peer_knowledge = peer_knowledge.get_mut(); if peer_knowledge.contains(&message_subject, message_kind) { - // Decompose to per candidate knowledge of assignment. - let mut message_subjects = Vec::new(); - for candidate_index in claimed_candidate_indices.clone() { - message_subjects.push(MessageSubject( - block_hash, - candidate_index, - validator_index, - )); - } - - // Checks if known already. - if !peer_knowledge.received.insert_many(message_subjects, message_kind) { + // wasn't included before + if !peer_knowledge.received.insert(message_subject.clone(), message_kind) { gum::debug!( target: LOG_TARGET, ?peer_id, @@ -807,7 +778,7 @@ impl State { ctx.send_message(ApprovalVotingMessage::CheckAndImportAssignment( assignment.clone(), - claimed_candidate_indices.clone(), + claimed_candidate_index, tx, )) .await; @@ -903,57 +874,28 @@ impl State { t.local_grid_neighbors().required_routing_by_index(validator_index, local) }); - let assignments = vec![(assignment.clone(), claimed_candidate_indices.clone())]; - let n_peers_total = self.peer_views.len(); - let source_peer = source.peer_id(); - - // Loop over all candidates in this assignment. - // TODO: Track message state separately for compact vrf assignments. - - // Note: at this point, we haven't received the message from any peers - // other than the source peer, and we just got it, so we haven't sent it - // to any peers either. - - let mut route_random = None; - for claimed_candidate_index in claimed_candidate_indices.clone() { - let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { - Some(candidate_entry) => { - let claimed_candidate_indices = claimed_candidate_indices.clone(); - let assignment = assignment.clone(); - // set the approval state for validator_index to Assigned - // unless the approval state is set already - candidate_entry.messages.entry(validator_index).or_insert_with(|| { - MessageState { - required_routing, - local, - random_routing: Default::default(), - approval_state: ApprovalState::Assigned( - assignment.cert, - claimed_candidate_indices, - ), - } - }) - }, - None => { - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?claimed_candidate_index, - "Expected a candidate entry on import_and_circulate_assignment", - ); - - return - }, - }; - - if route_random.is_none() { - route_random = Some(message_state.random_routing.sample(n_peers_total, rng)); - } + let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { + Some(candidate_entry) => { + // set the approval state for validator_index to Assigned + // unless the approval state is set already + candidate_entry.messages.entry(validator_index).or_insert_with(|| MessageState { + required_routing, + local, + random_routing: Default::default(), + approval_state: ApprovalState::Assigned(assignment.cert.clone()), + }) + }, + None => { + gum::warn!( + target: LOG_TARGET, + hash = ?block_hash, + ?claimed_candidate_index, + "Expected a candidate entry on import_and_circulate_assignment", + ); - if let Some(true) = route_random { - message_state.random_routing.inc_sent(); - } - } + return + }, + }; // Dispatch the message to all peers in the routing set which // know the block. @@ -961,7 +903,11 @@ impl State { // If the topology isn't known yet (race with networking subsystems) // then messages will be sent when we get it. - let peer_filter = move |peer| { + let assignments = vec![(assignment, claimed_candidate_index)]; + let n_peers_total = self.peer_views.len(); + let source_peer = source.peer_id(); + + let mut peer_filter = move |peer| { if Some(peer) == source_peer.as_ref() { return false } @@ -973,7 +919,16 @@ impl State { return true } - false + // Note: at this point, we haven't received the message from any peers + // other than the source peer, and we just got it, so we haven't sent it + // to any peers either. + let route_random = message_state.random_routing.sample(n_peers_total, rng); + + if route_random { + message_state.random_routing.inc_sent(); + } + + route_random }; let peers = entry.known_by.keys().filter(|p| peer_filter(p)).cloned().collect::>(); @@ -990,7 +945,7 @@ impl State { gum::trace!( target: LOG_TARGET, ?block_hash, - ?claimed_candidate_indices, + ?claimed_candidate_index, local = source.peer_id().is_none(), num_peers = peers.len(), "Sending an assignment to peers", @@ -1153,7 +1108,7 @@ impl State { // it should be in assigned state already match candidate_entry.messages.remove(&validator_index) { Some(MessageState { - approval_state: ApprovalState::Assigned(cert, candidate_indices), + approval_state: ApprovalState::Assigned(cert), required_routing, local, random_routing, @@ -1163,7 +1118,6 @@ impl State { MessageState { approval_state: ApprovalState::Approved( cert, - candidate_indices, vote.signature.clone(), ), required_routing, @@ -1303,8 +1257,8 @@ impl State { let sigs = candidate_entry.messages.iter().filter_map(|(validator_index, message_state)| { match &message_state.approval_state { - ApprovalState::Approved(_, _, sig) => Some((*validator_index, sig.clone())), - ApprovalState::Assigned(_, _) => None, + ApprovalState::Approved(_, sig) => Some((*validator_index, sig.clone())), + ApprovalState::Assigned(_) => None, } }); all_sigs.extend(sigs); @@ -1386,7 +1340,7 @@ impl State { validator: *validator, cert: message_state.approval_state.assignment_cert().clone(), }, - message_state.approval_state.candidate_indices().clone(), + candidate_index, ); let approval_message = @@ -1586,7 +1540,6 @@ async fn adjust_required_routing_and_propagate { state.handle_new_blocks(ctx, metrics, metas, rng).await; }, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { + ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index) => { gum::debug!( target: LOG_TARGET, - "Distributing our assignment on candidate (block={}, indices={:?})", + "Distributing our assignment on candidate (block={}, index={})", cert.block_hash, - candidate_indices, + candidate_index, ); state @@ -1742,7 +1695,7 @@ impl ApprovalDistribution { &metrics, MessageSource::Local, cert, - candidate_indices, + candidate_index, rng, ) .await; @@ -1812,7 +1765,7 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - assignments: Vec<(IndirectAssignmentCert, Vec)>, + assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, peer: PeerId, ) { let mut batches = assignments.into_iter().peekable(); diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index bbaa2d744729..459b9d4899fb 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -323,7 +323,7 @@ fn try_import_the_same_assignment() { // send the assignment related to `hash` let validator_index = ValidatorIndex(0); let cert = fake_assignment_cert(hash, validator_index); - let assignments = vec![(cert.clone(), vec![0u32])]; + let assignments = vec![(cert.clone(), 0u32)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); send_message_from_peer(overseer, &peer_a, msg).await; @@ -335,11 +335,10 @@ fn try_import_the_same_assignment() { overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - claimed_candidate_indices, + 0u32, tx, )) => { assert_eq!(assignment, cert); - assert_eq!(claimed_candidate_indices, vec![0u32]); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -411,7 +410,7 @@ fn spam_attack_results_in_negative_reputation_change() { .map(|candidate_index| { let validator_index = ValidatorIndex(candidate_index as u32); let cert = fake_assignment_cert(hash_b, validator_index); - (cert, vec![candidate_index as u32]) + (cert, candidate_index as u32) }) .collect(); @@ -493,7 +492,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), ) .await; @@ -523,7 +522,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { // but if someone else is sending it the same assignment // the peer could send us it as well - let assignments = vec![(cert, vec![candidate_index])]; + let assignments = vec![(cert, candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); send_message_from_peer(overseer, peer, msg.clone()).await; @@ -571,7 +570,7 @@ fn import_approval_happy_path() { let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, vec![candidate_index]), + ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), ) .await; @@ -669,7 +668,7 @@ fn import_approval_bad() { expect_reputation_change(overseer, &peer_b, COST_UNEXPECTED_MESSAGE).await; // now import an assignment from peer_b - let assignments = vec![(cert.clone(), vec![candidate_index])]; + let assignments = vec![(cert.clone(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); send_message_from_peer(overseer, &peer_b, msg).await; @@ -677,11 +676,11 @@ fn import_approval_bad() { overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - candidate_indices, + i, tx, )) => { assert_eq!(assignment, cert); - assert_eq!(candidate_indices[0], candidate_index); + assert_eq!(i, candidate_index); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -826,11 +825,9 @@ fn update_peer_view() { let cert_a = fake_assignment_cert(hash_a, ValidatorIndex(0)); let cert_b = fake_assignment_cert(hash_b, ValidatorIndex(0)); - overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_a, vec![0])) - .await; + overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_a, 0)).await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_b, vec![0])) - .await; + overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_b, 0)).await; // connect a peer setup_peer_with_view(overseer, peer, view![hash_a]).await; @@ -882,7 +879,7 @@ fn update_peer_view() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert_c.clone(), vec![0]), + ApprovalDistributionMessage::DistributeAssignment(cert_c.clone(), 0), ) .await; @@ -966,7 +963,7 @@ fn import_remotely_then_locally() { let validator_index = ValidatorIndex(0); let candidate_index = 0u32; let cert = fake_assignment_cert(hash, validator_index); - let assignments = vec![(cert.clone(), vec![candidate_index])]; + let assignments = vec![(cert.clone(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); send_message_from_peer(overseer, peer, msg).await; @@ -975,11 +972,11 @@ fn import_remotely_then_locally() { overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - candidate_indices, + i, tx, )) => { assert_eq!(assignment, cert); - assert_eq!(candidate_indices[0], candidate_index); + assert_eq!(i, candidate_index); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -989,7 +986,7 @@ fn import_remotely_then_locally() { // import the same assignment locally overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, vec![candidate_index]), + ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), ) .await; @@ -1061,7 +1058,7 @@ fn sends_assignments_even_when_state_is_approved() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), ) .await; @@ -1239,7 +1236,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), ) .await; @@ -1487,7 +1484,7 @@ fn propagates_to_required_after_connect() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), ) .await; @@ -1612,7 +1609,7 @@ fn sends_to_more_peers_after_getting_topology() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), ) .await; @@ -1771,7 +1768,7 @@ fn originator_aggression_l1() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), vec![candidate_index]), + ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), ) .await; From 7b6db76a20e2a7afbd673138e49f5aadd36c1157 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 15 Feb 2023 18:25:30 +0000 Subject: [PATCH 013/105] WIP Signed-off-by: Andrei Sandu --- Cargo.lock | 1 + node/network/approval-distribution/Cargo.toml | 1 + node/network/approval-distribution/src/lib.rs | 853 ++++++++++-------- 3 files changed, 461 insertions(+), 394 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 826e10e74d7b..c679cb7b38ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6430,6 +6430,7 @@ dependencies = [ "assert_matches", "env_logger 0.9.0", "futures", + "itertools", "log", "polkadot-node-metrics", "polkadot-node-network-protocol", diff --git a/node/network/approval-distribution/Cargo.toml b/node/network/approval-distribution/Cargo.toml index 6df854072aa6..3e1069334056 100644 --- a/node/network/approval-distribution/Cargo.toml +++ b/node/network/approval-distribution/Cargo.toml @@ -11,6 +11,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-primitives = { path = "../../../primitives" } rand = "0.8" +itertools = "0.10.5" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 3c6ed8661e0e..a49d0dd5da9e 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -20,7 +20,9 @@ #![warn(missing_docs)] +use self::metrics::Metrics; use futures::{channel::oneshot, FutureExt as _}; +use itertools::Itertools; use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, @@ -28,7 +30,7 @@ use polkadot_node_network_protocol::{ v1 as protocol_v1, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::approval::{ - AssignmentCert, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, + BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, }; use polkadot_node_subsystem::{ messages::{ @@ -43,8 +45,6 @@ use polkadot_primitives::{ use rand::{CryptoRng, Rng, SeedableRng}; use std::collections::{hash_map, BTreeMap, HashMap, HashSet, VecDeque}; -use self::metrics::Metrics; - mod metrics; #[cfg(test)] @@ -91,64 +91,128 @@ impl RecentlyOutdated { } } -// In case the original gtid topology mechanisms don't work on their own, we need to trade bandwidth -// for protocol liveliness by introducing aggression. -// -// Aggression has 3 levels: -// -// * Aggression Level 0: The basic behaviors described above. -// * Aggression Level 1: The originator of a message sends to all peers. Other peers follow the rules above. -// * Aggression Level 2: All peers send all messages to all their row and column neighbors. -// This means that each validator will, on average, receive each message approximately `2*sqrt(n)` times. -// The aggression level of messages pertaining to a block increases when that block is unfinalized and -// is a child of the finalized block. -// This means that only one block at a time has its messages propagated with aggression > 0. -// -// A note on aggression thresholds: changes in propagation apply only to blocks which are the -// _direct descendants_ of the finalized block which are older than the given threshold, -// not to all blocks older than the threshold. Most likely, a few assignments struggle to -// be propagated in a single block and this holds up all of its descendants blocks. -// Accordingly, we only step on the gas for the block which is most obviously holding up finality. - -/// Aggression configuration representation -#[derive(Clone)] -struct AggressionConfig { - /// Aggression level 1: all validators send all their own messages to all peers. - l1_threshold: Option, - /// Aggression level 2: level 1 + all validators send all messages to all peers in the X and Y dimensions. - l2_threshold: Option, - /// How often to re-send messages to all targeted recipients. - /// This applies to all unfinalized blocks. - resend_unfinalized_period: Option, +// Contains topology routing information for assignments and approvals. +struct ApprovalRouting { + pub required_routing: RequiredRouting, + pub local: bool, + pub random_routing: RandomRouting, } -impl AggressionConfig { - /// Returns `true` if block is not too old depending on the aggression level - fn is_age_relevant(&self, block_age: BlockNumber) -> bool { - if let Some(t) = self.l1_threshold { - block_age >= t - } else if let Some(t) = self.resend_unfinalized_period { - block_age > 0 && block_age % t == 0 - } else { - false +// This struct is responsible for tracking the full state of an assignment and grid routing information. +struct ApprovalEntry { + // The assignment certificate. + assignment: IndirectAssignmentCert, + // The candidates claimed by the certificate. + candidates: HashSet, + // The approval signatures for each `CandidateIndex` claimed by the assignment certificate. + approvals: HashMap, + // The validator index of the assignment signer. + validator_index: ValidatorIndex, + // Information required for gossiping to other peers using the grid topology. + routing_info: ApprovalRouting, +} + +impl ApprovalEntry { + pub fn new( + assignment: IndirectAssignmentCert, + candidates: Vec, + routing_info: ApprovalRouting, + ) -> ApprovalEntry { + Self { + validator_index: assignment.validator, + assignment, + approvals: HashMap::with_capacity(candidates.len()), + candidates: HashSet::from_iter(candidates.into_iter()), + routing_info, } } -} -impl Default for AggressionConfig { - fn default() -> Self { - AggressionConfig { - l1_threshold: Some(13), - l2_threshold: Some(28), - resend_unfinalized_period: Some(8), + // Create a `MessageSubject` to reference the assignment. + pub fn create_assignment_knowledge(&self, block_hash: Hash) -> (MessageSubject, MessageKind) { + ( + MessageSubject( + block_hash, + self.candidates.iter().cloned().collect::>(), + self.validator_index, + ), + MessageKind::Assignment, + ) + } + + // Create a `MessageSubject` to reference the assignment. + pub fn create_approval_knowledge( + &self, + block_hash: Hash, + candidate_index: CandidateIndex, + ) -> (MessageSubject, MessageKind) { + ( + MessageSubject(block_hash, vec![candidate_index], self.validator_index), + MessageKind::Approval, + ) + } + + // Returns true if an assigned candidate has been approved by the validator. + pub fn is_approved(&self, candidate_index: &CandidateIndex) -> bool { + self.approvals.contains_key(candidate_index) + } + + // Updates routing information and returns the previous information if any. + pub fn routing_info_mut(&mut self) -> &mut ApprovalRouting { + &mut self.routing_info + } + + // Get the routing information. + pub fn routing_info(&self) -> &ApprovalRouting { + &self.routing_info + } + + // Update routing information. + pub fn update_required_routing(&mut self, required_routing: RequiredRouting) { + self.routing_info.required_routing = required_routing; + } + + // Records a new approval. Returns false if the claimed candidate is not found or we already have received the approval. + // TODO: use specific errors instead of `bool`. + pub fn note_approval(&mut self, approval: IndirectSignedApprovalVote) -> bool { + // First do some sanity checks: + // - check validator index matches + // - check claimed candidate + // - check for duplicate approval + if self.validator_index != approval.validator { + return false + } + + if !self.candidates.contains(&approval.candidate_index) || + self.approvals.contains_key(&approval.candidate_index) + { + return false } + + self.approvals.insert(approval.candidate_index, approval).is_none() + } + + // Get the assignment certiticate and claimed candidates. + pub fn get_assignment(&self) -> (IndirectAssignmentCert, Vec) { + (self.assignment.clone(), self.candidates.into_iter().collect::>()) } -} -#[derive(PartialEq)] -enum Resend { - Yes, - No, + // Get an approval for a specific candidate if it exists. + pub fn get_approval( + &self, + candidate_index: CandidateIndex, + ) -> Option { + self.approvals.get(&candidate_index).cloned() + } + + // Get all approvals for all candidates claimed by the assignment. + pub fn get_approvals(&self) -> Vec { + self.approvals.values().cloned().collect::>() + } + + // Get validator index. + pub fn get_validator_index(&self) -> ValidatorIndex { + self.validator_index + } } /// The [`State`] struct is responsible for tracking the overall state of the subsystem. @@ -177,9 +241,6 @@ struct State { /// Tracks recently finalized blocks. recent_outdated_blocks: RecentlyOutdated, - - /// Config for aggression. - aggression_config: AggressionConfig, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -188,8 +249,11 @@ enum MessageKind { Approval, } +// Utility structure to identify assignments and approvals for specific candidates. +// Assignments can span multiple candidates, while approvals refer to only one candidate. +// #[derive(Debug, Clone, Hash, PartialEq, Eq)] -struct MessageSubject(Hash, CandidateIndex, ValidatorIndex); +struct MessageSubject(Hash, pub Vec, ValidatorIndex); #[derive(Debug, Clone, Default)] struct Knowledge { @@ -210,9 +274,11 @@ impl Knowledge { } fn insert(&mut self, message: MessageSubject, kind: MessageKind) -> bool { - match self.known_messages.entry(message) { + let success = match self.known_messages.entry(message) { hash_map::Entry::Vacant(vacant) => { vacant.insert(kind); + // If there are multiple candidates assigned in the message, create + // separate entries for each one. true }, hash_map::Entry::Occupied(mut occupied) => match (*occupied.get(), kind) { @@ -224,6 +290,21 @@ impl Knowledge { true }, }, + }; + + // In case of succesful insertion of multiple candidate assignments create additional + // entries for each assigned candidate. This fakes knowledge of individual assignments, but + // we need to share the same `MessageSubject` with the followup approval. + if kind == MessageKind::Assignment && success && message.1.len() > 1 { + message.1.iter().fold(success, |success, candidate_index| { + success & + self.insert( + MessageSubject(message.0.clone(), vec![*candidate_index], message.2), + kind, + ) + }) + } else { + success } } } @@ -249,56 +330,87 @@ struct BlockEntry { /// This maps to their knowledge of messages. known_by: HashMap, /// The number of the block. - number: BlockNumber, + pub number: BlockNumber, /// The parent hash of the block. - parent_hash: Hash, + pub parent_hash: Hash, /// Our knowledge of messages. - knowledge: Knowledge, + pub knowledge: Knowledge, /// A votes entry for each candidate indexed by [`CandidateIndex`]. candidates: Vec, /// The session index of this block. - session: SessionIndex, + pub session: SessionIndex, + /// Approval entries for whole block. These also contain all approvals in the cae of multiple candidates + /// being claimed by assignments. + approval_entries: HashMap<(ValidatorIndex, Vec), ApprovalEntry>, } -#[derive(Debug)] -enum ApprovalState { - Assigned(AssignmentCert), - Approved(AssignmentCert, ValidatorSignature), -} - -impl ApprovalState { - fn assignment_cert(&self) -> &AssignmentCert { - match *self { - ApprovalState::Assigned(ref cert) => cert, - ApprovalState::Approved(ref cert, _) => cert, +impl BlockEntry { + // Returns the peer which currently know this block. + pub fn known_by(&self) -> Vec { + self.known_by.keys().cloned().collect::>() + } + pub fn insert_approval_entry(&mut self, entry: ApprovalEntry) -> &mut ApprovalEntry { + // First map one entry per candidate to the same key we will use in `approval_entries. + // Key is (Validator_index, Vec), which are is the (K,V) pair in `candidate_entry.messages`. + for claimed_candidate_index in entry.candidates { + match self.candidates.get_mut(claimed_candidate_index as usize) { + Some(candidate_entry) => { + candidate_entry + .messages + .entry(entry.get_validator_index()) + .or_insert(entry.candidates.iter().cloned().collect::>()); + }, + None => { + // This should never happen, but if it happens, it means the subsystem is broken. + gum::warn!( + target: LOG_TARGET, + hash = ?entry.assignment.block_hash, + ?claimed_candidate_index, + "Missing candidate entry on `import_and_circulate_assignment`", + ); + }, + }; } + + self.approval_entries + .entry((entry.validator_index, entry.candidates.clone().into_iter().collect::>())) + .or_insert(entry) } - fn approval_signature(&self) -> Option { - match *self { - ApprovalState::Assigned(_) => None, - ApprovalState::Approved(_, ref sig) => Some(sig.clone()), - } + pub fn get_approval_entry( + &self, + candidate_index: CandidateIndex, + validator_index: ValidatorIndex, + ) -> Option<&mut ApprovalEntry> { + self.candidates + .get(candidate_index as usize) + .map_or(None, |candidate_entry| candidate_entry.messages.get(&validator_index)) + .map_or(None, |candidate_indices| { + self.approval_entries.get_mut(&(validator_index, *candidate_indices)) + }) } -} -// routing state bundled with messages for the candidate. Corresponding assignments -// and approvals are stored together and should be routed in the same way, with -// assignments preceding approvals in all cases. -#[derive(Debug)] -struct MessageState { - required_routing: RequiredRouting, - local: bool, - random_routing: RandomRouting, - approval_state: ApprovalState, + // Get all approval entries for a given candidate. + // TODO: Fix this crap + pub fn get_approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&mut ApprovalEntry> { + self.candidates + .get(candidate_index as usize) + .map_or(HashMap::new(), |candidate_entry| candidate_entry.messages) + .map(|messages| { + messages.unique().filter_map(|(validator_index, candidate_indices)| { + self.approval_entries.get_mut(&(*validator_index, *candidate_indices)) + }).collect::>() + }) + } } -/// Information about candidates in the context of a particular block they are included in. -/// In other words, multiple `CandidateEntry`s may exist for the same candidate, -/// if it is included by multiple blocks - this is likely the case when there are forks. +// Information about candidates in the context of a particular block they are included in. +// In other words, multiple `CandidateEntry`s may exist for the same candidate, +// if it is included by multiple blocks - this is likely the case when there are forks. #[derive(Debug, Default)] struct CandidateEntry { - messages: HashMap, + // The value represents part of the lookup key in `approval_entries` to fetch the assignment and existing votes. + messages: HashMap>, } #[derive(Debug, Clone, PartialEq)] @@ -317,7 +429,7 @@ impl MessageSource { } enum PendingMessage { - Assignment(IndirectAssignmentCert, CandidateIndex), + Assignment(IndirectAssignmentCert, Vec), Approval(IndirectSignedApprovalVote), } @@ -403,6 +515,7 @@ impl State { knowledge: Knowledge::default(), candidates, session: meta.session, + approval_entries: HashMap::new(), }); self.topologies.inc_session_refs(meta.session); @@ -472,13 +585,13 @@ impl State { for (peer_id, message) in to_import { match message { - PendingMessage::Assignment(assignment, claimed_index) => { + PendingMessage::Assignment(assignment, claimed_indices) => { self.import_and_circulate_assignment( ctx, metrics, MessageSource::Peer(peer_id), assignment, - claimed_index, + claimed_indices, rng, ) .await; @@ -497,7 +610,7 @@ impl State { } } - self.enable_aggression(ctx, Resend::Yes, metrics).await; + // self.enable_aggression(ctx, Resend::Yes, metrics).await; } async fn handle_new_session_topology( @@ -521,10 +634,12 @@ impl State { &self.topologies, |block_entry| block_entry.session == session, |required_routing, local, validator_index| { - if *required_routing == RequiredRouting::PendingTopology { - *required_routing = topology + if required_routing == &RequiredRouting::PendingTopology { + topology .local_grid_neighbors() - .required_routing_by_index(*validator_index, local); + .required_routing_by_index(*validator_index, local) + } else { + *required_routing } }, ) @@ -549,23 +664,24 @@ impl State { num = assignments.len(), "Processing assignments from a peer", ); - for (assignment, claimed_index) in assignments.into_iter() { + for (assignment, claimed_indices) in assignments.into_iter() { if let Some(pending) = self.pending_known.get_mut(&assignment.block_hash) { - let message_subject = MessageSubject( - assignment.block_hash, - claimed_index, - assignment.validator, - ); + let block_hash = &assignment.block_hash; + let validator_index = assignment.validator; gum::trace!( target: LOG_TARGET, %peer_id, - ?message_subject, + ?block_hash, + ?claimed_indices, + ?validator_index, "Pending assignment", ); - pending - .push((peer_id, PendingMessage::Assignment(assignment, claimed_index))); + pending.push(( + peer_id, + PendingMessage::Assignment(assignment, claimed_indices), + )); continue } @@ -575,7 +691,7 @@ impl State { metrics, MessageSource::Peer(peer_id), assignment, - claimed_index, + claimed_indices, rng, ) .await; @@ -590,17 +706,17 @@ impl State { ); for approval_vote in approvals.into_iter() { if let Some(pending) = self.pending_known.get_mut(&approval_vote.block_hash) { - let message_subject = MessageSubject( - approval_vote.block_hash, - approval_vote.candidate_index, - approval_vote.validator, - ); + let block_hash = approval_vote.block_hash; + let candidate_index = approval_vote.candidate_index; + let validator_index = approval_vote.validator; gum::trace!( target: LOG_TARGET, %peer_id, - ?message_subject, - "Pending approval", + ?block_hash, + ?candidate_index, + ?validator_index, + "Pending assignment", ); pending.push((peer_id, PendingMessage::Approval(approval_vote))); @@ -694,7 +810,7 @@ impl State { // If a block was finalized, this means we may need to move our aggression // forward to the now oldest block(s). - self.enable_aggression(ctx, Resend::No, metrics).await; + // self.enable_aggression(ctx, Resend::No, metrics).await; } async fn import_and_circulate_assignment( @@ -703,7 +819,7 @@ impl State { metrics: &Metrics, source: MessageSource, assignment: IndirectAssignmentCert, - claimed_candidate_index: CandidateIndex, + claimed_candidate_indices: Vec, rng: &mut R, ) where R: CryptoRng + Rng, @@ -730,9 +846,11 @@ impl State { }, }; - // compute metadata on the assignment. - let message_subject = MessageSubject(block_hash, claimed_candidate_index, validator_index); - let message_kind = MessageKind::Assignment; + // Compute metadata on the assignment. + let (message_subject, message_kind) = ( + MessageSubject(block_hash, claimed_candidate_indices.clone(), validator_index), + MessageKind::Assignment, + ); if let Some(peer_id) = source.peer_id() { // check if our knowledge of the peer already contains this assignment @@ -778,7 +896,7 @@ impl State { ctx.send_message(ApprovalVotingMessage::CheckAndImportAssignment( assignment.clone(), - claimed_candidate_index, + claimed_candidate_indices.clone(), tx, )) .await; @@ -874,28 +992,14 @@ impl State { t.local_grid_neighbors().required_routing_by_index(validator_index, local) }); - let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { - Some(candidate_entry) => { - // set the approval state for validator_index to Assigned - // unless the approval state is set already - candidate_entry.messages.entry(validator_index).or_insert_with(|| MessageState { - required_routing, - local, - random_routing: Default::default(), - approval_state: ApprovalState::Assigned(assignment.cert.clone()), - }) - }, - None => { - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?claimed_candidate_index, - "Expected a candidate entry on import_and_circulate_assignment", - ); + // All the peers that know the relay chain block. + let peers_to_filter = entry.known_by(); - return - }, - }; + let approval_entry = entry.insert_approval_entry(ApprovalEntry::new( + assignment.clone(), + claimed_candidate_indices.clone(), + ApprovalRouting { required_routing, local, random_routing: Default::default() }, + )); // Dispatch the message to all peers in the routing set which // know the block. @@ -903,35 +1007,38 @@ impl State { // If the topology isn't known yet (race with networking subsystems) // then messages will be sent when we get it. - let assignments = vec![(assignment, claimed_candidate_index)]; + let assignments = vec![(assignment, claimed_candidate_indices.clone())]; let n_peers_total = self.peer_views.len(); let source_peer = source.peer_id(); - let mut peer_filter = move |peer| { - if Some(peer) == source_peer.as_ref() { - return false + // Peers that we will send the assignment to. + let mut peers = Vec::new(); + + // Filter destination peers + for peer in peers_to_filter.into_iter() { + if Some(peer) == source_peer { + continue } if let Some(true) = topology .as_ref() - .map(|t| t.local_grid_neighbors().route_to_peer(required_routing, peer)) + .map(|t| t.local_grid_neighbors().route_to_peer(required_routing, &peer)) { - return true + peers.push(peer.clone()); + continue } // Note: at this point, we haven't received the message from any peers // other than the source peer, and we just got it, so we haven't sent it // to any peers either. - let route_random = message_state.random_routing.sample(n_peers_total, rng); + let route_random = + approval_entry.routing_info().random_routing.sample(n_peers_total, rng); if route_random { - message_state.random_routing.inc_sent(); + approval_entry.routing_info_mut().random_routing.inc_sent(); + peers.push(peer.clone()); } - - route_random - }; - - let peers = entry.known_by.keys().filter(|p| peer_filter(p)).cloned().collect::>(); + } // Add the metadata of the assignment to the knowledge of each peer. for peer in peers.iter() { @@ -945,7 +1052,7 @@ impl State { gum::trace!( target: LOG_TARGET, ?block_hash, - ?claimed_candidate_index, + ?claimed_candidate_indices, local = source.peer_id().is_none(), num_peers = peers.len(), "Sending an assignment to peers", @@ -973,7 +1080,8 @@ impl State { let candidate_index = vote.candidate_index; let entry = match self.blocks.get_mut(&block_hash) { - Some(entry) if entry.candidates.get(candidate_index as usize).is_some() => entry, + Some(entry) if entry.get_approval_entry(candidate_index, validator_index).is_some() => + entry, _ => { if let Some(peer_id) = source.peer_id() { if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) { @@ -985,7 +1093,7 @@ impl State { }; // compute metadata on the assignment. - let message_subject = MessageSubject(block_hash, candidate_index, validator_index); + let message_subject = MessageSubject(block_hash, vec![candidate_index], validator_index); let message_kind = MessageKind::Approval; if let Some(peer_id) = source.peer_id() { @@ -1099,66 +1207,43 @@ impl State { } } + // The entry is created when assignment is imported, so we assume this exists. + let approval_entry = entry.get_approval_entry(candidate_index, validator_index); + if approval_entry.is_none() { + let peer_id = source.peer_id(); + // This indicates a bug in approval-distribution, since we check the knowledge at the begining of the function. + gum::warn!( + target: LOG_TARGET, + ?peer_id, + ?message_subject, + "Unknown approval assignment", + ); + // No rep change as this is caused by an issue + return + } + + let approval_entry = approval_entry.expect("Just checked above; qed"); + // Invariant: to our knowledge, none of the peers except for the `source` know about the approval. metrics.on_approval_imported(); - let required_routing = match entry.candidates.get_mut(candidate_index as usize) { - Some(candidate_entry) => { - // set the approval state for validator_index to Approved - // it should be in assigned state already - match candidate_entry.messages.remove(&validator_index) { - Some(MessageState { - approval_state: ApprovalState::Assigned(cert), - required_routing, - local, - random_routing, - }) => { - candidate_entry.messages.insert( - validator_index, - MessageState { - approval_state: ApprovalState::Approved( - cert, - vote.signature.clone(), - ), - required_routing, - local, - random_routing, - }, - ); - - required_routing - }, - Some(_) => { - unreachable!( - "we only insert it after the metadata, checked the metadata above; qed" - ); - }, - None => { - // this would indicate a bug in approval-voting - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?candidate_index, - ?validator_index, - "Importing an approval we don't have an assignment for", - ); + if !approval_entry.note_approval(vote.clone()) { + // this would indicate a bug in approval-voting: + // - validator index mismatch + // - candidate index mismatch + // - duplicate approval + gum::warn!( + target: LOG_TARGET, + hash = ?block_hash, + ?candidate_index, + ?validator_index, + "Possible bug: Vote import failed: validator/candidate index mismatch or duplicate", + ); - return - }, - } - }, - None => { - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?candidate_index, - ?validator_index, - "Expected a candidate entry on import_and_circulate_approval", - ); + return + } - return - }, - }; + let required_routing = approval_entry.routing_info().required_routing; // Dispatch a ApprovalDistributionV1Message::Approval(vote) // to all peers required by the topology, with the exception of the source peer. @@ -1242,6 +1327,7 @@ impl State { Some(e) => e, }; + // TODO: fix mapping of candidates to validator index and claimed indices let candidate_entry = match block_entry.candidates.get(index as usize) { None => { gum::debug!( @@ -1254,18 +1340,23 @@ impl State { }, Some(e) => e, }; - let sigs = - candidate_entry.messages.iter().filter_map(|(validator_index, message_state)| { - match &message_state.approval_state { - ApprovalState::Approved(_, sig) => Some((*validator_index, sig.clone())), - ApprovalState::Assigned(_) => None, - } - }); + + let sigs = block_entry + .get_approval_entries(index as usize) + .into_iter() + .map(|approval_entry| { + approval_entry + .get_approvals() + .iter() + .map(|approval| (approval.validator, approval.signature.clone())) + }) + .collect::>(); all_sigs.extend(sigs); } all_sigs } + // TODO: Refactor as in `adjust_required_routing_and_propagate`. async fn unify_with_peer( sender: &mut impl overseer::ApprovalDistributionSenderTrait, metrics: &Metrics, @@ -1285,6 +1376,8 @@ impl State { let view_finalized_number = view.finalized_number; for head in view.into_iter() { let mut block = head; + + // Walk the chain back to last finalized block of the peer view. loop { let entry = match entries.get_mut(&block) { Some(entry) if entry.number > view_finalized_number => entry, @@ -1299,19 +1392,16 @@ impl State { } let peer_knowledge = entry.known_by.entry(peer_id).or_default(); - let topology = topologies.get_topology(entry.session); - // Iterate all messages in all candidates. - for (candidate_index, validator, message_state) in - entry.candidates.iter_mut().enumerate().flat_map(|(c_i, c)| { - c.messages.iter_mut().map(move |(k, v)| (c_i as _, k, v)) - }) { + // We want to iterate the `approval_entries` of the block entry as these contain all assignments + // that also link all approval votes. + for approval_entry in entry.approval_entries.values_mut() { // Propagate the message to all peers in the required routing set OR // randomly sample peers. { - let random_routing = &mut message_state.random_routing; - let required_routing = message_state.required_routing; + let required_routing = approval_entry.routing_info().required_routing; + let random_routing = &mut approval_entry.routing_info_mut().random_routing; let rng = &mut *rng; let mut peer_filter = move |peer_id| { let in_topology = topology.as_ref().map_or(false, |t| { @@ -1332,39 +1422,24 @@ impl State { } } - let message_subject = MessageSubject(block, candidate_index, *validator); - - let assignment_message = ( - IndirectAssignmentCert { - block_hash: block, - validator: *validator, - cert: message_state.approval_state.assignment_cert().clone(), - }, - candidate_index, - ); - - let approval_message = - message_state.approval_state.approval_signature().map(|signature| { - IndirectSignedApprovalVote { - block_hash: block, - validator: *validator, - candidate_index, - signature, - } - }); + let assignment_message = approval_entry.get_assignment(); + let approval_messages = approval_entry.get_approvals(); + let (assignment_knowledge, message_kind) = + approval_entry.create_assignment_knowledge(block); - if !peer_knowledge.contains(&message_subject, MessageKind::Assignment) { - peer_knowledge - .sent - .insert(message_subject.clone(), MessageKind::Assignment); + // Only send stuff a peer doesn't know in the context of a relay chain block. + if !peer_knowledge.contains(&assignment_knowledge, message_kind) { + peer_knowledge.sent.insert(assignment_knowledge, message_kind); assignments_to_send.push(assignment_message); } - if let Some(approval_message) = approval_message { - if !peer_knowledge.contains(&message_subject, MessageKind::Approval) { - peer_knowledge - .sent - .insert(message_subject.clone(), MessageKind::Approval); + // Filter approval votes. + for approval_message in approval_messages { + let (approval_knowledge, message_kind) = approval_entry + .create_approval_knowledge(block, approval_message.candidate_index); + + if !peer_knowledge.contains(&approval_knowledge, message_kind) { + peer_knowledge.sent.insert(approval_knowledge, message_kind); approvals_to_send.push(approval_message); } } @@ -1397,95 +1472,95 @@ impl State { } } - async fn enable_aggression( - &mut self, - ctx: &mut Context, - resend: Resend, - metrics: &Metrics, - ) { - let min_age = self.blocks_by_number.iter().next().map(|(num, _)| num); - let max_age = self.blocks_by_number.iter().rev().next().map(|(num, _)| num); - let config = self.aggression_config.clone(); - - let (min_age, max_age) = match (min_age, max_age) { - (Some(min), Some(max)) => (min, max), - _ => return, // empty. - }; - - let diff = max_age - min_age; - if !self.aggression_config.is_age_relevant(diff) { - return - } - - adjust_required_routing_and_propagate( - ctx, - &mut self.blocks, - &self.topologies, - |block_entry| { - let block_age = max_age - block_entry.number; - - if resend == Resend::Yes && - config - .resend_unfinalized_period - .as_ref() - .map_or(false, |p| block_age > 0 && block_age % p == 0) - { - // Retry sending to all peers. - for (_, knowledge) in block_entry.known_by.iter_mut() { - knowledge.sent = Knowledge::default(); - } - - true - } else { - false - } - }, - |_, _, _| {}, - ) - .await; - - adjust_required_routing_and_propagate( - ctx, - &mut self.blocks, - &self.topologies, - |block_entry| { - // Ramp up aggression only for the very oldest block(s). - // Approval voting can get stuck on a single block preventing - // its descendants from being finalized. Waste minimal bandwidth - // this way. Also, disputes might prevent finality - again, nothing - // to waste bandwidth on newer blocks for. - &block_entry.number == min_age - }, - |required_routing, local, _| { - // It's a bit surprising not to have a topology at this age. - if *required_routing == RequiredRouting::PendingTopology { - gum::debug!( - target: LOG_TARGET, - age = ?diff, - "Encountered old block pending gossip topology", - ); - return - } - - if config.l1_threshold.as_ref().map_or(false, |t| &diff >= t) { - // Message originator sends to everyone. - if local && *required_routing != RequiredRouting::All { - metrics.on_aggression_l1(); - *required_routing = RequiredRouting::All; - } - } - - if config.l2_threshold.as_ref().map_or(false, |t| &diff >= t) { - // Message originator sends to everyone. Everyone else sends to XY. - if !local && *required_routing != RequiredRouting::GridXY { - metrics.on_aggression_l2(); - *required_routing = RequiredRouting::GridXY; - } - } - }, - ) - .await; - } + // async fn enable_aggression( + // &mut self, + // ctx: &mut Context, + // resend: Resend, + // metrics: &Metrics, + // ) { + // let min_age = self.blocks_by_number.iter().next().map(|(num, _)| num); + // let max_age = self.blocks_by_number.iter().rev().next().map(|(num, _)| num); + // let config = self.aggression_config.clone(); + + // let (min_age, max_age) = match (min_age, max_age) { + // (Some(min), Some(max)) => (min, max), + // _ => return, // empty. + // }; + + // let diff = max_age - min_age; + // if !self.aggression_config.is_age_relevant(diff) { + // return + // } + + // adjust_required_routing_and_propagate( + // ctx, + // &mut self.blocks, + // &self.topologies, + // |block_entry| { + // let block_age = max_age - block_entry.number; + + // if resend == Resend::Yes && + // config + // .resend_unfinalized_period + // .as_ref() + // .map_or(false, |p| block_age > 0 && block_age % p == 0) + // { + // // Retry sending to all peers. + // for (_, knowledge) in block_entry.known_by.iter_mut() { + // knowledge.sent = Knowledge::default(); + // } + + // true + // } else { + // false + // } + // }, + // |_, _, _| {}, + // ) + // .await; + + // adjust_required_routing_and_propagate( + // ctx, + // &mut self.blocks, + // &self.topologies, + // |block_entry| { + // // Ramp up aggression only for the very oldest block(s). + // // Approval voting can get stuck on a single block preventing + // // its descendants from being finalized. Waste minimal bandwidth + // // this way. Also, disputes might prevent finality - again, nothing + // // to waste bandwidth on newer blocks for. + // &block_entry.number == min_age + // }, + // |required_routing, local, _| { + // // It's a bit surprising not to have a topology at this age. + // if *required_routing == RequiredRouting::PendingTopology { + // gum::debug!( + // target: LOG_TARGET, + // age = ?diff, + // "Encountered old block pending gossip topology", + // ); + // return + // } + + // if config.l1_threshold.as_ref().map_or(false, |t| &diff >= t) { + // // Message originator sends to everyone. + // if local && *required_routing != RequiredRouting::All { + // metrics.on_aggression_l1(); + // *required_routing = RequiredRouting::All; + // } + // } + + // if config.l2_threshold.as_ref().map_or(false, |t| &diff >= t) { + // // Message originator sends to everyone. Everyone else sends to XY. + // if !local && *required_routing != RequiredRouting::GridXY { + // metrics.on_aggression_l2(); + // *required_routing = RequiredRouting::GridXY; + // } + // } + // }, + // ) + // .await; + // } } // This adjusts the required routing of messages in blocks that pass the block filter @@ -1509,7 +1584,7 @@ async fn adjust_required_routing_and_propagate bool, - RoutingModifier: Fn(&mut RequiredRouting, bool, &ValidatorIndex), + RoutingModifier: Fn(&RequiredRouting, bool, &ValidatorIndex) -> RequiredRouting, { let mut peer_assignments = HashMap::new(); let mut peer_approvals = HashMap::new(); @@ -1521,64 +1596,55 @@ async fn adjust_required_routing_and_propagate t, + None => continue, + }; - if message_state.required_routing.is_empty() { - continue - } + // We just need to iterate the `approval_entries` of the block entry as these contain all assignments + // that also link all approval votes. + for approval_entry in block_entry.approval_entries.values_mut() { + let new_required_routing = routing_modifier( + &approval_entry.routing_info().required_routing, + approval_entry.routing_info().local, + &approval_entry.get_validator_index(), + ); - let topology = match topologies.get_topology(block_entry.session) { - Some(t) => t, - None => continue, - }; + approval_entry.update_required_routing(new_required_routing); - // Propagate the message to all peers in the required routing set. - let message_subject = MessageSubject(*block_hash, candidate_index, *validator); + if approval_entry.routing_info().required_routing.is_empty() { + continue + } - let assignment_message = ( - IndirectAssignmentCert { - block_hash: *block_hash, - validator: *validator, - cert: message_state.approval_state.assignment_cert().clone(), - }, - candidate_index, - ); - let approval_message = - message_state.approval_state.approval_signature().map(|signature| { - IndirectSignedApprovalVote { - block_hash: *block_hash, - validator: *validator, - candidate_index, - signature, - } - }); + let assignment_message = approval_entry.get_assignment(); + let approval_messages = approval_entry.get_approvals(); + let (assignment_knowledge, message_kind) = + approval_entry.create_assignment_knowledge(*block_hash); for (peer, peer_knowledge) in &mut block_entry.known_by { if !topology .local_grid_neighbors() - .route_to_peer(message_state.required_routing, peer) + .route_to_peer(approval_entry.routing_info().required_routing, peer) { continue } - if !peer_knowledge.contains(&message_subject, MessageKind::Assignment) { - peer_knowledge.sent.insert(message_subject.clone(), MessageKind::Assignment); + // Only send stuff a peer doesn't know in the context of a relay chain block. + if !peer_knowledge.contains(&assignment_knowledge, message_kind) { + peer_knowledge.sent.insert(assignment_knowledge.clone(), message_kind); peer_assignments .entry(*peer) .or_insert_with(Vec::new) .push(assignment_message.clone()); } - if let Some(approval_message) = approval_message.as_ref() { - if !peer_knowledge.contains(&message_subject, MessageKind::Approval) { - peer_knowledge.sent.insert(message_subject.clone(), MessageKind::Approval); + // Filter approval votes. + for approval_message in &approval_messages { + let (approval_knowledge, message_kind) = approval_entry + .create_approval_knowledge(*block_hash, approval_message.candidate_index); + + if !peer_knowledge.contains(&approval_knowledge, message_kind) { + peer_knowledge.sent.insert(approval_knowledge, message_kind); peer_approvals .entry(*peer) .or_insert_with(Vec::new) @@ -1590,7 +1656,6 @@ async fn adjust_required_routing_and_propagate { state.handle_new_blocks(ctx, metrics, metas, rng).await; }, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index) => { + ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { gum::debug!( target: LOG_TARGET, - "Distributing our assignment on candidate (block={}, index={})", + "Distributing our assignment on candidate (block={}, indices={:?})", cert.block_hash, - candidate_index, + candidate_indices, ); state @@ -1695,7 +1760,7 @@ impl ApprovalDistribution { &metrics, MessageSource::Local, cert, - candidate_index, + candidate_indices, rng, ) .await; @@ -1765,7 +1830,7 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, + assignments: Vec<(IndirectAssignmentCert, Vec)>, peer: PeerId, ) { let mut batches = assignments.into_iter().peekable(); From 46d76d43b0ccf774f2f8c8b3df1cb531c39cd5ca Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 16 Feb 2023 09:32:19 +0000 Subject: [PATCH 014/105] Fix compile errors Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 98 ++++++++++++------- 1 file changed, 62 insertions(+), 36 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index a49d0dd5da9e..e763a9aaeeac 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -193,7 +193,7 @@ impl ApprovalEntry { // Get the assignment certiticate and claimed candidates. pub fn get_assignment(&self) -> (IndirectAssignmentCert, Vec) { - (self.assignment.clone(), self.candidates.into_iter().collect::>()) + (self.assignment.clone(), self.candidates.iter().cloned().collect::>()) } // Get an approval for a specific candidate if it exists. @@ -274,7 +274,7 @@ impl Knowledge { } fn insert(&mut self, message: MessageSubject, kind: MessageKind) -> bool { - let success = match self.known_messages.entry(message) { + let success = match self.known_messages.entry(message.clone()) { hash_map::Entry::Vacant(vacant) => { vacant.insert(kind); // If there are multiple candidates assigned in the message, create @@ -349,11 +349,12 @@ impl BlockEntry { pub fn known_by(&self) -> Vec { self.known_by.keys().cloned().collect::>() } + pub fn insert_approval_entry(&mut self, entry: ApprovalEntry) -> &mut ApprovalEntry { // First map one entry per candidate to the same key we will use in `approval_entries. // Key is (Validator_index, Vec), which are is the (K,V) pair in `candidate_entry.messages`. - for claimed_candidate_index in entry.candidates { - match self.candidates.get_mut(claimed_candidate_index as usize) { + for claimed_candidate_index in &entry.candidates { + match self.candidates.get_mut(*claimed_candidate_index as usize) { Some(candidate_entry) => { candidate_entry .messages @@ -373,34 +374,59 @@ impl BlockEntry { } self.approval_entries - .entry((entry.validator_index, entry.candidates.clone().into_iter().collect::>())) + .entry(( + entry.validator_index, + entry.candidates.clone().into_iter().collect::>(), + )) .or_insert(entry) } - pub fn get_approval_entry( + pub fn contains_approval_entry( &self, candidate_index: CandidateIndex, validator_index: ValidatorIndex, + ) -> bool { + self.candidates + .get(candidate_index as usize) + .map_or(None, |candidate_entry| candidate_entry.messages.get(&validator_index)) + .map_or(false, |candidate_indices| { + self.approval_entries + .contains_key(&(validator_index, candidate_indices.clone())) + }) + } + + pub fn get_approval_entry( + &mut self, + candidate_index: CandidateIndex, + validator_index: ValidatorIndex, ) -> Option<&mut ApprovalEntry> { self.candidates .get(candidate_index as usize) .map_or(None, |candidate_entry| candidate_entry.messages.get(&validator_index)) .map_or(None, |candidate_indices| { - self.approval_entries.get_mut(&(validator_index, *candidate_indices)) + self.approval_entries.get_mut(&(validator_index, candidate_indices.clone())) }) } // Get all approval entries for a given candidate. // TODO: Fix this crap - pub fn get_approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&mut ApprovalEntry> { - self.candidates + pub fn get_approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&ApprovalEntry> { + let approval_entry_keys = self + .candidates .get(candidate_index as usize) - .map_or(HashMap::new(), |candidate_entry| candidate_entry.messages) - .map(|messages| { - messages.unique().filter_map(|(validator_index, candidate_indices)| { - self.approval_entries.get_mut(&(*validator_index, *candidate_indices)) - }).collect::>() - }) + .map_or(HashMap::new(), |candidate_entry| candidate_entry.messages.clone()); + + let approval_entry_keys = approval_entry_keys.iter().unique().collect::>(); + + let mut entries = Vec::new(); + for (validator_index, candidate_indices) in approval_entry_keys { + if let Some(entry) = + self.approval_entries.get(&(*validator_index, candidate_indices.clone())) + { + entries.push(entry); + } + } + entries } } @@ -787,8 +813,8 @@ impl State { async fn handle_block_finalized( &mut self, - ctx: &mut Context, - metrics: &Metrics, + _ctx: &mut Context, + _metrics: &Metrics, finalized_number: BlockNumber, ) { // we want to prune every block up to (including) finalized_number @@ -1080,8 +1106,7 @@ impl State { let candidate_index = vote.candidate_index; let entry = match self.blocks.get_mut(&block_hash) { - Some(entry) if entry.get_approval_entry(candidate_index, validator_index).is_some() => - entry, + Some(entry) if entry.contains_approval_entry(candidate_index, validator_index) => entry, _ => { if let Some(peer_id) = source.peer_id() { if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) { @@ -1315,7 +1340,7 @@ impl State { ) -> HashMap { let mut all_sigs = HashMap::new(); for (hash, index) in indices { - let block_entry = match self.blocks.get(&hash) { + let block_entry = match self.blocks.get_mut(&hash) { None => { gum::debug!( target: LOG_TARGET, @@ -1327,29 +1352,30 @@ impl State { Some(e) => e, }; - // TODO: fix mapping of candidates to validator index and claimed indices - let candidate_entry = match block_entry.candidates.get(index as usize) { - None => { - gum::debug!( - target: LOG_TARGET, - ?hash, - ?index, - "`get_approval_signatures`: could not find candidate entry for given hash and index!" - ); - continue - }, - Some(e) => e, - }; + // // TODO: fix mapping of candidates to validator index and claimed indices + // let candidate_entry = match block_entry.candidates.get(index as usize) { + // None => { + // gum::debug!( + // target: LOG_TARGET, + // ?hash, + // ?index, + // "`get_approval_signatures`: could not find candidate entry for given hash and index!" + // ); + // continue + // }, + // Some(e) => e, + // }; let sigs = block_entry - .get_approval_entries(index as usize) + .get_approval_entries(index) .into_iter() .map(|approval_entry| { approval_entry .get_approvals() - .iter() - .map(|approval| (approval.validator, approval.signature.clone())) + .into_iter() + .map(|approval| (approval.validator, approval.signature)) }) + .flatten() .collect::>(); all_sigs.extend(sigs); } From b18295eda1682ccfa53d089a368e331776d68f02 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 16 Feb 2023 12:41:31 +0000 Subject: [PATCH 015/105] add todo Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index e763a9aaeeac..51852960899b 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -947,7 +947,7 @@ impl State { match result { AssignmentCheckResult::Accepted => { modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await; - entry.knowledge.known_messages.insert(message_subject.clone(), message_kind); + entry.knowledge.insert(message_subject.clone(), message_kind); if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { peer_knowledge.received.insert(message_subject.clone(), message_kind); } @@ -1773,6 +1773,7 @@ impl ApprovalDistribution { state.handle_new_blocks(ctx, metrics, metas, rng).await; }, ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { + // TODO: approval voting bug: Fix `Importing locally an already known assignment` for multiple candidate assignments. gum::debug!( target: LOG_TARGET, "Distributing our assignment on candidate (block={}, indices={:?})", From a41254cf623a23645d8274cada920a29549b4fee Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 17 Feb 2023 09:26:00 +0000 Subject: [PATCH 016/105] Keep track of peer protocol version Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 57 +++++++++++++------ 1 file changed, 39 insertions(+), 18 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 51852960899b..10215e9b7379 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -23,6 +23,7 @@ use self::metrics::Metrics; use futures::{channel::oneshot, FutureExt as _}; use itertools::Itertools; +use net_protocol::peer_set::ProtocolVersion; use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, @@ -215,6 +216,12 @@ impl ApprovalEntry { } } +// We keep track of each peer view and protocol version using this struct. +struct PeerEntry { + pub view: View, + pub version: ProtocolVersion, +} + /// The [`State`] struct is responsible for tracking the overall state of the subsystem. /// /// It tracks metadata about our view of the unfinalized chain, @@ -234,7 +241,7 @@ struct State { pending_known: HashMap>, /// Peer data is partially stored here, and partially inline within the [`BlockEntry`]s - peer_views: HashMap, + peer_views: HashMap, /// Keeps a topology for various different sessions. topologies: SessionGridTopologies, @@ -351,8 +358,9 @@ impl BlockEntry { } pub fn insert_approval_entry(&mut self, entry: ApprovalEntry) -> &mut ApprovalEntry { - // First map one entry per candidate to the same key we will use in `approval_entries. - // Key is (Validator_index, Vec), which are is the (K,V) pair in `candidate_entry.messages`. + // First map one entry per candidate to the same key we will use in `approval_entries`. + // Key is (Validator_index, Vec) that links the `ApprovalEntry` to the (K,V) + // entry in `candidate_entry.messages`. for claimed_candidate_index in &entry.candidates { match self.candidates.get_mut(*claimed_candidate_index as usize) { Some(candidate_entry) => { @@ -381,6 +389,8 @@ impl BlockEntry { .or_insert(entry) } + // Returns `true` if we have an approval for `candidate_index` from validator + // `validator_index`. pub fn contains_approval_entry( &self, candidate_index: CandidateIndex, @@ -395,6 +405,8 @@ impl BlockEntry { }) } + // Returns a mutable reference of `ApprovalEntry` for `candidate_index` from validator + // `validator_index`. pub fn get_approval_entry( &mut self, candidate_index: CandidateIndex, @@ -409,24 +421,29 @@ impl BlockEntry { } // Get all approval entries for a given candidate. - // TODO: Fix this crap pub fn get_approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&ApprovalEntry> { + // Get the keys for fetching `ApprovalEntry` from `self.approval_entries`, let approval_entry_keys = self .candidates .get(candidate_index as usize) - .map_or(HashMap::new(), |candidate_entry| candidate_entry.messages.clone()); + .map(|candidate_entry| &candidate_entry.messages); - let approval_entry_keys = approval_entry_keys.iter().unique().collect::>(); + if let Some(approval_entry_keys) = approval_entry_keys { + // Ensure no duplicates. + let approval_entry_keys = approval_entry_keys.iter().unique().collect::>(); - let mut entries = Vec::new(); - for (validator_index, candidate_indices) in approval_entry_keys { - if let Some(entry) = - self.approval_entries.get(&(*validator_index, candidate_indices.clone())) - { - entries.push(entry); + let mut entries = Vec::new(); + for (validator_index, candidate_indices) in approval_entry_keys { + if let Some(entry) = + self.approval_entries.get(&(*validator_index, candidate_indices.clone())) + { + entries.push(entry); + } } + entries + } else { + vec![] } - entries } } @@ -469,10 +486,12 @@ impl State { rng: &mut (impl CryptoRng + Rng), ) { match event { - NetworkBridgeEvent::PeerConnected(peer_id, role, _, _) => { + NetworkBridgeEvent::PeerConnected(peer_id, role, version, _) => { // insert a blank view if none already present gum::trace!(target: LOG_TARGET, ?peer_id, ?role, "Peer connected"); - self.peer_views.entry(peer_id).or_default(); + self.peer_views + .entry(peer_id) + .or_insert(PeerEntry { view: Default::default(), version }); }, NetworkBridgeEvent::PeerDisconnected(peer_id) => { gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected"); @@ -564,7 +583,7 @@ impl State { { let sender = ctx.sender(); - for (peer_id, view) in self.peer_views.iter() { + for (peer_id, PeerEntry { view, version: _ }) in self.peer_views.iter() { let intersection = view.iter().filter(|h| new_hashes.contains(h)); let view_intersection = View::new(intersection.cloned(), view.finalized_number); Self::unify_with_peer( @@ -776,8 +795,10 @@ impl State { { gum::trace!(target: LOG_TARGET, ?view, "Peer view change"); let finalized_number = view.finalized_number; - let old_view = - self.peer_views.get_mut(&peer_id).map(|d| std::mem::replace(d, view.clone())); + let old_view = self + .peer_views + .get_mut(&peer_id) + .map(|d| std::mem::replace(&mut d.view, view.clone())); let old_finalized_number = old_view.map(|v| v.finalized_number).unwrap_or(0); // we want to prune every block known_by peer up to (including) view.finalized_number From 3bb8823460a84a3fd3d050071a95b758056ee06e Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 20 Feb 2023 13:43:25 +0000 Subject: [PATCH 017/105] network protocol bump and addition Signed-off-by: Andrei Sandu --- node/network/protocol/src/lib.rs | 7 +++++-- node/network/protocol/src/peer_set.rs | 2 ++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 7625fddd346a..d715a5d42fe1 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -265,7 +265,7 @@ impl Versioned<&'_ V1> { } } -/// All supported versions of the validation protocol message. +/// All supported versions of the validation protocol message v1. pub type VersionedValidationProtocol = Versioned; impl From for VersionedValidationProtocol { @@ -489,10 +489,13 @@ pub mod v1 { /// /// Actually checking the assignment may yield a different result. #[codec(index = 0)] - Assignments(Vec<(IndirectAssignmentCert, Vec)>), + Assignments(Vec<(IndirectAssignmentCert, CandidateIndex)>), /// Approvals for candidates in some recent, unfinalized block. #[codec(index = 1)] Approvals(Vec), + /// Assignments version 2 supporting multiple candidates + #[codec(index = 2)] + AssignmentsV2(Vec<(IndirectAssignmentCert, Vec)>), } /// Dummy network message type, so we will receive connect/disconnect events. diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index faeea10e4cea..a21a4bb2b42c 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -211,6 +211,8 @@ impl From for u32 { pub enum ValidationVersion { /// The first version. V1 = 1, + /// The second version adds `AssignmentsV2` message to approval distribution. + V2 = 2, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. From b34788ed45069644cfd81e949f701a314a1316b9 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 20 Feb 2023 13:44:17 +0000 Subject: [PATCH 018/105] Approval distribution handing of v1 vs v2 Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 239 +++++++++++++----- 1 file changed, 179 insertions(+), 60 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 10215e9b7379..e66ad3ebce7b 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -23,7 +23,7 @@ use self::metrics::Metrics; use futures::{channel::oneshot, FutureExt as _}; use itertools::Itertools; -use net_protocol::peer_set::ProtocolVersion; +use net_protocol::peer_set::{ProtocolVersion, ValidationVersion}; use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, @@ -583,7 +583,7 @@ impl State { { let sender = ctx.sender(); - for (peer_id, PeerEntry { view, version: _ }) in self.peer_views.iter() { + for (peer_id, PeerEntry { view, version }) in self.peer_views.iter() { let intersection = view.iter().filter(|h| new_hashes.contains(h)); let view_intersection = View::new(intersection.cloned(), view.finalized_number); Self::unify_with_peer( @@ -593,6 +593,7 @@ impl State { &self.topologies, self.peer_views.len(), *peer_id, + *version, view_intersection, rng, ) @@ -687,10 +688,52 @@ impl State { *required_routing } }, + &self.peer_views, ) .await; } + async fn process_incoming_assignments( + &mut self, + ctx: &mut Context, + metrics: &Metrics, + peer_id: PeerId, + assignments: Vec<(IndirectAssignmentCert, Vec)>, + rng: &mut R, + ) where + R: CryptoRng + Rng, + { + for (assignment, claimed_indices) in assignments { + if let Some(pending) = self.pending_known.get_mut(&assignment.block_hash) { + let block_hash = &assignment.block_hash; + let validator_index = assignment.validator; + + gum::trace!( + target: LOG_TARGET, + %peer_id, + ?block_hash, + ?claimed_indices, + ?validator_index, + "Pending assignment", + ); + + pending.push((peer_id, PendingMessage::Assignment(assignment, claimed_indices))); + + continue + } + + self.import_and_circulate_assignment( + ctx, + metrics, + MessageSource::Peer(peer_id), + assignment, + claimed_indices, + rng, + ) + .await; + } + } + async fn process_incoming_peer_message( &mut self, ctx: &mut Context, @@ -702,45 +745,34 @@ impl State { R: CryptoRng + Rng, { match msg { + protocol_v1::ApprovalDistributionMessage::AssignmentsV2(assignments) => { + gum::trace!( + target: LOG_TARGET, + peer_id = %peer_id, + num = assignments.len(), + "Processing assignments (V2) from a peer", + ); + self.process_incoming_assignments(ctx, metrics, peer_id, assignments, rng).await; + }, protocol_v1::ApprovalDistributionMessage::Assignments(assignments) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, num = assignments.len(), - "Processing assignments from a peer", + "Processing assignments (V1) from a peer", ); - for (assignment, claimed_indices) in assignments.into_iter() { - if let Some(pending) = self.pending_known.get_mut(&assignment.block_hash) { - let block_hash = &assignment.block_hash; - let validator_index = assignment.validator; - gum::trace!( - target: LOG_TARGET, - %peer_id, - ?block_hash, - ?claimed_indices, - ?validator_index, - "Pending assignment", - ); - - pending.push(( - peer_id, - PendingMessage::Assignment(assignment, claimed_indices), - )); - - continue - } - - self.import_and_circulate_assignment( - ctx, - metrics, - MessageSource::Peer(peer_id), - assignment, - claimed_indices, - rng, - ) - .await; - } + self.process_incoming_assignments( + ctx, + metrics, + peer_id, + assignments + .into_iter() + .map(|(cert, candidate)| (cert, vec![candidate])) + .collect::>(), + rng, + ) + .await; }, protocol_v1::ApprovalDistributionMessage::Approvals(approvals) => { gum::trace!( @@ -795,10 +827,22 @@ impl State { { gum::trace!(target: LOG_TARGET, ?view, "Peer view change"); let finalized_number = view.finalized_number; - let old_view = self - .peer_views - .get_mut(&peer_id) - .map(|d| std::mem::replace(&mut d.view, view.clone())); + + let (old_view, protocol_version) = + if let Some(peer_entry) = self.peer_views.get_mut(&peer_id) { + (Some(std::mem::replace(&mut peer_entry.view, view.clone())), peer_entry.version) + } else { + // This shouldn't happen, but if it does we assume protocol version 1. + gum::warn!( + target: LOG_TARGET, + ?peer_id, + ?view, + "Peer view change for missing `peer_entry`" + ); + + (None, ValidationVersion::V1.into()) + }; + let old_finalized_number = old_view.map(|v| v.finalized_number).unwrap_or(0); // we want to prune every block known_by peer up to (including) view.finalized_number @@ -826,6 +870,7 @@ impl State { &self.topologies, self.peer_views.len(), peer_id, + protocol_version, view, rng, ) @@ -1105,13 +1150,16 @@ impl State { "Sending an assignment to peers", ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Assignments(assignments), - )), - )) - .await; + let peers = peers + .iter() + .filter_map(|peer_id| { + self.peer_views + .get(peer_id) + .map(|peer_entry| (peer_id.clone(), peer_entry.version)) + }) + .collect::>(); + + send_assignments_batched(ctx.sender(), assignments, &peers).await; } } @@ -1411,6 +1459,7 @@ impl State { topologies: &SessionGridTopologies, total_peers: usize, peer_id: PeerId, + protocol_version: ProtocolVersion, view: View, rng: &mut (impl CryptoRng + Rng), ) { @@ -1504,7 +1553,12 @@ impl State { "Sending assignments to unified peer", ); - send_assignments_batched(sender, assignments_to_send, peer_id).await; + send_assignments_batched( + sender, + assignments_to_send, + &vec![(peer_id, protocol_version)], + ) + .await; } if !approvals_to_send.is_empty() { @@ -1629,6 +1683,7 @@ async fn adjust_required_routing_and_propagate, ) where BlockFilter: Fn(&mut BlockEntry) -> bool, RoutingModifier: Fn(&RequiredRouting, bool, &ValidatorIndex) -> RequiredRouting, @@ -1704,7 +1759,17 @@ async fn adjust_required_routing_and_propagate() / 3, ); +// Low level helper for sending assignments. +async fn send_assignments_batched_inner( + sender: &mut impl overseer::ApprovalDistributionSenderTrait, + batch: Vec<(IndirectAssignmentCert, Vec)>, + peers: &Vec<&PeerId>, + peer_version: u32, +) { + let peers = peers.into_iter().cloned().cloned().collect::>(); + if peer_version == 2 { + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::AssignmentsV2(batch), + )), + )) + .await; + } else { + let batch = batch.into_iter().map(|(cert, candidates)| (cert, candidates[0])).collect(); + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Assignments(batch), + )), + )) + .await; + } +} + /// Send assignments while honoring the `max_notification_size` of the protocol. /// /// Splitting the messages into multiple notifications allows more granular processing at the @@ -1878,22 +1973,46 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - assignments: Vec<(IndirectAssignmentCert, Vec)>, - peer: PeerId, + v2_assignments: Vec<(IndirectAssignmentCert, Vec)>, + peers: &Vec<(PeerId, ProtocolVersion)>, ) { - let mut batches = assignments.into_iter().peekable(); + let v1_peers = peers + .iter() + .filter_map( + |(peer_id, version)| if u32::from(*version) == 1 { Some(peer_id) } else { None }, + ) + .collect::>(); + let v2_peers = peers + .iter() + .filter_map( + |(peer_id, version)| if u32::from(*version) == 2 { Some(peer_id) } else { None }, + ) + .collect::>(); - while batches.peek().is_some() { - let batch: Vec<_> = batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect(); + if v1_peers.len() > 0 { + let mut v1_assignments = v2_assignments.clone(); + // Older peers(v1) do not understand `AssignmentsV2` messages, so we have to filter these out. + v1_assignments.retain(|(assignment, candidates)| candidates.len() == 1); - sender - .send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer], - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Assignments(batch), - )), - )) - .await; + let mut v1_batches = v1_assignments.into_iter().peekable(); + + while v1_batches.peek().is_some() { + let batch: Vec<_> = v1_batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect(); + + // If there are multiple candidates claimed this is only supported for V2 + send_assignments_batched_inner(sender, batch, &v1_peers, 1).await; + } + } + + if v2_peers.len() > 0 { + let mut v2_batches = v2_assignments.into_iter().peekable(); + + while v2_batches.peek().is_some() { + let batch = v2_batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); + + // If there are multiple candidates claimed this is only supported for V2 + send_assignments_batched_inner(sender, batch, &v2_peers, 2).await; + } } } From 48109ecc66df689a40f516d67d4fd5f1e8f75dc0 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 23 Feb 2023 13:11:29 +0000 Subject: [PATCH 019/105] Add VStaging network protocol Signed-off-by: Andrei Sandu --- .../approval-voting/src/approval_db/v1/mod.rs | 4 +- node/core/approval-voting/src/criteria.rs | 27 +-- node/core/approval-voting/src/lib.rs | 18 +- .../approval-voting/src/persisted_entries.rs | 4 +- node/core/approval-voting/src/tests.rs | 8 +- node/network/approval-distribution/src/lib.rs | 159 ++++++++++-------- .../approval-distribution/src/tests.rs | 40 ++--- node/network/bitfield-distribution/src/lib.rs | 1 + node/network/bridge/src/network.rs | 1 + node/network/bridge/src/rx/mod.rs | 111 ++++++++++-- node/network/bridge/src/tx/mod.rs | 36 +++- .../src/collator_side/mod.rs | 2 + .../src/validator_side/mod.rs | 3 + node/network/gossip-support/src/lib.rs | 3 + node/network/protocol/src/lib.rs | 122 +++++++++++--- node/network/protocol/src/peer_set.rs | 32 +++- .../network/statement-distribution/src/lib.rs | 16 ++ node/primitives/src/approval.rs | 101 +++++++++++ node/subsystem-types/src/messages.rs | 6 +- 19 files changed, 529 insertions(+), 165 deletions(-) diff --git a/node/core/approval-voting/src/approval_db/v1/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs index a761b7f20e84..58781e76ce39 100644 --- a/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -17,7 +17,7 @@ //! Version 1 of the DB schema. use parity_scale_codec::{Decode, Encode}; -use polkadot_node_primitives::approval::{AssignmentCert, DelayTranche}; +use polkadot_node_primitives::approval::{AssignmentCertV2, DelayTranche}; use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{ @@ -161,7 +161,7 @@ pub struct Config { /// Details pertaining to our assignment on a block. #[derive(Encode, Decode, Debug, Clone, PartialEq)] pub struct OurAssignment { - pub cert: AssignmentCert, + pub cert: AssignmentCertV2, pub tranche: DelayTranche, pub validator_index: ValidatorIndex, // Whether the assignment has been triggered already. diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index b35758046175..5c6f5f68cca5 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -18,7 +18,7 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ - self as approval_types, AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory, + self as approval_types, AssignmentCertKindV2, AssignmentCertV2, DelayTranche, RelayVRFStory, }; use polkadot_primitives::{ AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, IndexedVec, SessionInfo, @@ -38,7 +38,7 @@ use super::LOG_TARGET; /// Details pertaining to our assignment on a block. #[derive(Debug, Clone, Encode, Decode, PartialEq)] pub struct OurAssignment { - cert: AssignmentCert, + cert: AssignmentCertV2, tranche: DelayTranche, validator_index: ValidatorIndex, // Whether the assignment has been triggered already. @@ -46,7 +46,7 @@ pub struct OurAssignment { } impl OurAssignment { - pub(crate) fn cert(&self) -> &AssignmentCert { + pub(crate) fn cert(&self) -> &AssignmentCertV2 { &self.cert } @@ -68,6 +68,7 @@ impl OurAssignment { } impl From for OurAssignment { + // TODO: OurAssignment changed -> migration for parachains db approval voting column. fn from(entry: crate::approval_db::v1::OurAssignment) -> Self { OurAssignment { cert: entry.cert, @@ -233,7 +234,7 @@ pub(crate) trait AssignmentCriteria { validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, - assignment: &AssignmentCert, + assignment: &AssignmentCertV2, // Backing groups for each assigned core `CoreIndex`. backing_groups: Vec, ) -> Result; @@ -258,7 +259,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, - assignment: &AssignmentCert, + assignment: &AssignmentCertV2, backing_groups: Vec, ) -> Result { check_assignment_cert( @@ -417,8 +418,8 @@ fn compute_relay_vrf_modulo_assignments( }; if let Some(assignment) = maybe_assignment.map(|(vrf_in_out, vrf_proof, _)| { - let cert = AssignmentCert { - kind: AssignmentCertKind::RelayVRFModuloCompact { + let cert = AssignmentCertV2 { + kind: AssignmentCertKindV2::RelayVRFModuloCompact { sample: config.relay_vrf_modulo_samples - 1, core_indices: assigned_cores.clone(), }, @@ -455,8 +456,8 @@ fn compute_relay_vrf_delay_assignments( config.zeroth_delay_tranche_width, ); - let cert = AssignmentCert { - kind: AssignmentCertKind::RelayVRFDelay { core_index: core }, + let cert = AssignmentCertV2 { + kind: AssignmentCertKindV2::RelayVRFDelay { core_index: core }, vrf: ( approval_types::VRFOutput(vrf_in_out.to_output()), approval_types::VRFProof(vrf_proof), @@ -535,7 +536,7 @@ pub(crate) fn check_assignment_cert( validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, - assignment: &AssignmentCert, + assignment: &AssignmentCertV2, backing_groups: Vec, ) -> Result { use InvalidAssignmentReason as Reason; @@ -570,7 +571,7 @@ pub(crate) fn check_assignment_cert( let &(ref vrf_output, ref vrf_proof) = &assignment.vrf; match &assignment.kind { - AssignmentCertKind::RelayVRFModuloCompact { sample, core_indices } => { + AssignmentCertKindV2::RelayVRFModuloCompact { sample, core_indices } => { if *sample >= config.relay_vrf_modulo_samples { return Err(InvalidAssignment(Reason::SampleOutOfBounds)) } @@ -607,7 +608,7 @@ pub(crate) fn check_assignment_cert( Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, - AssignmentCertKind::RelayVRFModulo { sample } => { + AssignmentCertKindV2::RelayVRFModulo { sample } => { if *sample >= config.relay_vrf_modulo_samples { return Err(InvalidAssignment(Reason::SampleOutOfBounds)) } @@ -635,7 +636,7 @@ pub(crate) fn check_assignment_cert( Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, - AssignmentCertKind::RelayVRFDelay { core_index } => { + AssignmentCertKindV2::RelayVRFDelay { core_index } => { if *core_index != claimed_core_indices[0] { return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch)) } diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 62e83fea3623..9777bdab97f1 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -24,8 +24,8 @@ use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ - AssignmentCert, AssignmentCertKind, BlockApprovalMeta, DelayTranche, - IndirectAssignmentCert, IndirectSignedApprovalVote, + AssignmentCertKindV2, AssignmentCertV2, BlockApprovalMeta, DelayTranche, + IndirectAssignmentCertV2, IndirectSignedApprovalVote, }, ValidationResult, APPROVAL_EXECUTION_TIMEOUT, }; @@ -742,7 +742,7 @@ enum Action { }, LaunchApproval { candidate_hash: CandidateHash, - indirect_cert: IndirectAssignmentCert, + indirect_cert: IndirectAssignmentCertV2, assignment_tranche: DelayTranche, relay_block_hash: Hash, candidate_index: CandidateIndex, @@ -1051,11 +1051,11 @@ async fn handle_actions( fn cores_to_candidate_indices( block_entry: &BlockEntry, candidate_index: CandidateIndex, - cert: &AssignmentCert, + cert: &AssignmentCertV2, ) -> Vec { let mut candidate_indices = Vec::new(); match &cert.kind { - AssignmentCertKind::RelayVRFModuloCompact { sample: _, core_indices } => { + AssignmentCertKindV2::RelayVRFModuloCompact { sample: _, core_indices } => { for cert_core_index in core_indices { if let Some(candidate_index) = block_entry .candidates() @@ -1120,7 +1120,7 @@ fn distribution_messages_for_activation( (None, None) | (None, Some(_)) => {}, // second is impossible case. (Some(assignment), None) => { messages.push(ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash, validator: assignment.validator_index(), cert: assignment.cert().clone(), @@ -1130,7 +1130,7 @@ fn distribution_messages_for_activation( }, (Some(assignment), Some(approval_sig)) => { messages.push(ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash, validator: assignment.validator_index(), cert: assignment.cert().clone(), @@ -1711,7 +1711,7 @@ fn schedule_wakeup_action( fn check_and_import_assignment( state: &State, db: &mut OverlayedBackend<'_, impl Backend>, - assignment: IndirectAssignmentCert, + assignment: IndirectAssignmentCertV2, candidate_indices: Vec, ) -> SubsystemResult<(AssignmentCheckResult, Vec)> { let tick_now = state.clock.tick_now(); @@ -2296,7 +2296,7 @@ fn process_wakeup( if let Some((cert, val_index, tranche)) = maybe_cert { let indirect_cert = - IndirectAssignmentCert { block_hash: relay_block, validator: val_index, cert }; + IndirectAssignmentCertV2 { block_hash: relay_block, validator: val_index, cert }; let index_in_candidate = block_entry.candidates().iter().position(|(_, h)| &candidate_hash == h); diff --git a/node/core/approval-voting/src/persisted_entries.rs b/node/core/approval-voting/src/persisted_entries.rs index 1df0ff91c1a1..91e7a381d637 100644 --- a/node/core/approval-voting/src/persisted_entries.rs +++ b/node/core/approval-voting/src/persisted_entries.rs @@ -20,7 +20,7 @@ //! Within that context, things are plain-old-data. Within this module, //! data and logic are intertwined. -use polkadot_node_primitives::approval::{AssignmentCert, DelayTranche, RelayVRFStory}; +use polkadot_node_primitives::approval::{AssignmentCertV2, DelayTranche, RelayVRFStory}; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, @@ -107,7 +107,7 @@ impl ApprovalEntry { pub fn trigger_our_assignment( &mut self, tick_now: Tick, - ) -> Option<(AssignmentCert, ValidatorIndex, DelayTranche)> { + ) -> Option<(AssignmentCertV2, ValidatorIndex, DelayTranche)> { let our = self.our_assignment.as_mut().and_then(|a| { if a.triggered() { return None diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index b5cc00974fdd..c87d5308f562 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -620,7 +620,7 @@ async fn check_and_import_assignment( overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash, validator, cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }), @@ -1106,7 +1106,7 @@ fn blank_subsystem_act_on_bad_block() { &mut virtual_overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash: bad_block_hash.clone(), validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { @@ -1774,7 +1774,7 @@ fn linear_import_act_on_leaf() { &mut virtual_overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash: head, validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { @@ -1844,7 +1844,7 @@ fn forkful_import_at_same_height_act_on_leaf() { &mut virtual_overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash: head, validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index e66ad3ebce7b..2b078908c6e5 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -25,13 +25,14 @@ use futures::{channel::oneshot, FutureExt as _}; use itertools::Itertools; use net_protocol::peer_set::{ProtocolVersion, ValidationVersion}; use polkadot_node_network_protocol::{ - self as net_protocol, + self as net_protocol, filter_by_peer_version, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, peer_set::MAX_NOTIFICATION_SIZE, - v1 as protocol_v1, PeerId, UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::approval::{ - BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, + BlockApprovalMeta, IndirectAssignmentCertV2, IndirectSignedApprovalVote, }; use polkadot_node_subsystem::{ messages::{ @@ -102,7 +103,7 @@ struct ApprovalRouting { // This struct is responsible for tracking the full state of an assignment and grid routing information. struct ApprovalEntry { // The assignment certificate. - assignment: IndirectAssignmentCert, + assignment: IndirectAssignmentCertV2, // The candidates claimed by the certificate. candidates: HashSet, // The approval signatures for each `CandidateIndex` claimed by the assignment certificate. @@ -115,7 +116,7 @@ struct ApprovalEntry { impl ApprovalEntry { pub fn new( - assignment: IndirectAssignmentCert, + assignment: IndirectAssignmentCertV2, candidates: Vec, routing_info: ApprovalRouting, ) -> ApprovalEntry { @@ -152,11 +153,6 @@ impl ApprovalEntry { ) } - // Returns true if an assigned candidate has been approved by the validator. - pub fn is_approved(&self, candidate_index: &CandidateIndex) -> bool { - self.approvals.contains_key(candidate_index) - } - // Updates routing information and returns the previous information if any. pub fn routing_info_mut(&mut self) -> &mut ApprovalRouting { &mut self.routing_info @@ -193,18 +189,10 @@ impl ApprovalEntry { } // Get the assignment certiticate and claimed candidates. - pub fn get_assignment(&self) -> (IndirectAssignmentCert, Vec) { + pub fn get_assignment(&self) -> (IndirectAssignmentCertV2, Vec) { (self.assignment.clone(), self.candidates.iter().cloned().collect::>()) } - // Get an approval for a specific candidate if it exists. - pub fn get_approval( - &self, - candidate_index: CandidateIndex, - ) -> Option { - self.approvals.get(&candidate_index).cloned() - } - // Get all approvals for all candidates claimed by the assignment. pub fn get_approvals(&self) -> Vec { self.approvals.values().cloned().collect::>() @@ -301,7 +289,7 @@ impl Knowledge { // In case of succesful insertion of multiple candidate assignments create additional // entries for each assigned candidate. This fakes knowledge of individual assignments, but - // we need to share the same `MessageSubject` with the followup approval. + // we need to share the same `MessageSubject` with the followup approval candidate index. if kind == MessageKind::Assignment && success && message.1.len() > 1 { message.1.iter().fold(success, |success, candidate_index| { success & @@ -472,7 +460,7 @@ impl MessageSource { } enum PendingMessage { - Assignment(IndirectAssignmentCert, Vec), + Assignment(IndirectAssignmentCertV2, Vec), Approval(IndirectSignedApprovalVote), } @@ -532,8 +520,8 @@ impl State { live }); }, - NetworkBridgeEvent::PeerMessage(peer_id, Versioned::V1(msg)) => { - self.process_incoming_peer_message(ctx, metrics, peer_id, msg, rng).await; + NetworkBridgeEvent::PeerMessage(peer_id, message) => { + self.process_incoming_peer_message(ctx, metrics, peer_id, message, rng).await; }, } } @@ -698,7 +686,7 @@ impl State { ctx: &mut Context, metrics: &Metrics, peer_id: PeerId, - assignments: Vec<(IndirectAssignmentCert, Vec)>, + assignments: Vec<(IndirectAssignmentCertV2, Vec)>, rng: &mut R, ) where R: CryptoRng + Rng, @@ -739,13 +727,18 @@ impl State { ctx: &mut Context, metrics: &Metrics, peer_id: PeerId, - msg: protocol_v1::ApprovalDistributionMessage, + msg: Versioned< + protocol_v1::ApprovalDistributionMessage, + protocol_vstaging::ApprovalDistributionMessage, + >, rng: &mut R, ) where R: CryptoRng + Rng, { match msg { - protocol_v1::ApprovalDistributionMessage::AssignmentsV2(assignments) => { + Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments( + assignments, + )) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -754,7 +747,7 @@ impl State { ); self.process_incoming_assignments(ctx, metrics, peer_id, assignments, rng).await; }, - protocol_v1::ApprovalDistributionMessage::Assignments(assignments) => { + Versioned::V1(protocol_v1::ApprovalDistributionMessage::Assignments(assignments)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -768,13 +761,16 @@ impl State { peer_id, assignments .into_iter() - .map(|(cert, candidate)| (cert, vec![candidate])) + .map(|(cert, candidate)| (cert.into(), vec![candidate])) .collect::>(), rng, ) .await; }, - protocol_v1::ApprovalDistributionMessage::Approvals(approvals) => { + Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals( + approvals, + )) | + Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -910,7 +906,7 @@ impl State { ctx: &mut Context, metrics: &Metrics, source: MessageSource, - assignment: IndirectAssignmentCert, + assignment: IndirectAssignmentCertV2, claimed_candidate_indices: Vec, rng: &mut R, ) where @@ -1569,7 +1565,8 @@ impl State { "Sending approvals to unified peer", ); - send_approvals_batched(sender, approvals_to_send, peer_id).await; + send_approvals_batched(sender, approvals_to_send, &vec![(peer_id, protocol_version)]) + .await; } } @@ -1773,7 +1770,17 @@ async fn adjust_required_routing_and_propagate usize { /// configuration. pub const MAX_ASSIGNMENT_BATCH_SIZE: usize = ensure_size_not_zero( MAX_NOTIFICATION_SIZE as usize / - std::mem::size_of::<(IndirectAssignmentCert, CandidateIndex)>() / + std::mem::size_of::<(IndirectAssignmentCertV2, CandidateIndex)>() / 3, ); @@ -1939,22 +1946,28 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( // Low level helper for sending assignments. async fn send_assignments_batched_inner( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - batch: Vec<(IndirectAssignmentCert, Vec)>, - peers: &Vec<&PeerId>, + batch: Vec<(IndirectAssignmentCertV2, Vec)>, + peers: &Vec, + // TODO: use `ValidationVersion`. peer_version: u32, ) { - let peers = peers.into_iter().cloned().cloned().collect::>(); + let peers = peers.into_iter().cloned().collect::>(); if peer_version == 2 { sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::AssignmentsV2(batch), + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(batch), )), )) .await; } else { - let batch = batch.into_iter().map(|(cert, candidates)| (cert, candidates[0])).collect(); + // Create a batch of v1 assignments from v2 assignments that are compatible with v1. + // `IndirectAssignmentCertV2` -> `IndirectAssignmentCert` + let batch = batch + .into_iter() + .filter_map(|(cert, candidates)| cert.try_into().ok().map(|cert| (cert, candidates[0]))) + .collect(); sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( peers, @@ -1973,26 +1986,16 @@ async fn send_assignments_batched_inner( /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - v2_assignments: Vec<(IndirectAssignmentCert, Vec)>, + v2_assignments: Vec<(IndirectAssignmentCertV2, Vec)>, peers: &Vec<(PeerId, ProtocolVersion)>, ) { - let v1_peers = peers - .iter() - .filter_map( - |(peer_id, version)| if u32::from(*version) == 1 { Some(peer_id) } else { None }, - ) - .collect::>(); - let v2_peers = peers - .iter() - .filter_map( - |(peer_id, version)| if u32::from(*version) == 2 { Some(peer_id) } else { None }, - ) - .collect::>(); + let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); + let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); if v1_peers.len() > 0 { let mut v1_assignments = v2_assignments.clone(); // Older peers(v1) do not understand `AssignmentsV2` messages, so we have to filter these out. - v1_assignments.retain(|(assignment, candidates)| candidates.len() == 1); + v1_assignments.retain(|(_, candidates)| candidates.len() == 1); let mut v1_batches = v1_assignments.into_iter().peekable(); @@ -2016,24 +2019,48 @@ pub(crate) async fn send_assignments_batched( } } -/// Send approvals while honoring the `max_notification_size` of the protocol. +/// Send approvals while honoring the `max_notification_size` of the protocol and peer version. pub(crate) async fn send_approvals_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, approvals: Vec, - peer: PeerId, + peers: &Vec<(PeerId, ProtocolVersion)>, ) { - let mut batches = approvals.into_iter().peekable(); + let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); + let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); - while batches.peek().is_some() { - let batch: Vec<_> = batches.by_ref().take(MAX_APPROVAL_BATCH_SIZE).collect(); + if v1_peers.len() > 0 { + let mut batches = approvals.clone().into_iter().peekable(); + + while batches.peek().is_some() { + let batch: Vec<_> = batches.by_ref().take(MAX_APPROVAL_BATCH_SIZE).collect(); + + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage( + v1_peers.clone(), + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Approvals(batch), + )), + )) + .await; + } + } - sender - .send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer], - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(batch), - )), - )) - .await; + if v2_peers.len() > 0 { + let mut batches = approvals.into_iter().peekable(); + + while batches.peek().is_some() { + let batch: Vec<_> = batches.by_ref().take(MAX_APPROVAL_BATCH_SIZE).collect(); + + sender + .send_message(NetworkBridgeTxMessage::SendValidationMessage( + v2_peers.clone(), + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Approvals(batch), + ), + ), + )) + .await; + } } } diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 459b9d4899fb..67fcea33b018 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -24,7 +24,7 @@ use polkadot_node_network_protocol::{ view, ObservedRole, }; use polkadot_node_primitives::approval::{ - AssignmentCertKind, VRFOutput, VRFProof, RELAY_VRF_MODULO_CONTEXT, + AssignmentCert, AssignmentCertKind, VRFOutput, VRFProof, RELAY_VRF_MODULO_CONTEXT, }; use polkadot_node_subsystem::messages::{network_bridge_event, AllMessages, ApprovalCheckError}; use polkadot_node_subsystem_test_helpers as test_helpers; @@ -35,7 +35,6 @@ use rand::SeedableRng; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_core::crypto::Pair as PairT; use std::time::Duration; - type VirtualOverseer = test_helpers::TestSubsystemContextHandle; fn test_harness>( @@ -252,7 +251,7 @@ async fn send_message_from_peer( .await; } -fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> IndirectAssignmentCert { +fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> IndirectAssignmentCertV2 { let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); let msg = b"WhenParachains?"; let mut prng = rand_core::OsRng; @@ -260,7 +259,7 @@ fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> Indirect let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); let out = inout.to_output(); - IndirectAssignmentCert { + IndirectAssignmentCertV2 { block_hash, validator, cert: AssignmentCert { @@ -323,7 +322,7 @@ fn try_import_the_same_assignment() { // send the assignment related to `hash` let validator_index = ValidatorIndex(0); let cert = fake_assignment_cert(hash, validator_index); - let assignments = vec![(cert.clone(), 0u32)]; + let assignments = vec![(cert.clone(), vec![0u32])]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); send_message_from_peer(overseer, &peer_a, msg).await; @@ -335,9 +334,10 @@ fn try_import_the_same_assignment() { overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( assignment, - 0u32, + claimed_indices, tx, )) => { + assert_eq!(claimed_indices, vec![0u32]); assert_eq!(assignment, cert); tx.send(AssignmentCheckResult::Accepted).unwrap(); } @@ -488,7 +488,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { // import an assignment related to `hash` locally let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, @@ -566,7 +566,7 @@ fn import_approval_happy_path() { // import an assignment related to `hash` locally let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, @@ -652,7 +652,7 @@ fn import_approval_bad() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; let cert = fake_assignment_cert(hash, validator_index); // send the an approval from peer_b, we don't have an assignment yet @@ -961,7 +961,7 @@ fn import_remotely_then_locally() { // import the assignment remotely first let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; let cert = fake_assignment_cert(hash, validator_index); let assignments = vec![(cert.clone(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); @@ -1045,7 +1045,7 @@ fn sends_assignments_even_when_state_is_approved() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1223,7 +1223,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1325,7 +1325,7 @@ fn propagates_assignments_along_unshared_dimension() { // Test messages from X direction go to Y peers { let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1374,7 +1374,7 @@ fn propagates_assignments_along_unshared_dimension() { // Test messages from X direction go to Y peers { let validator_index = ValidatorIndex(50); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1471,7 +1471,7 @@ fn propagates_to_required_after_connect() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1596,7 +1596,7 @@ fn sends_to_more_peers_after_getting_topology() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1748,7 +1748,7 @@ fn originator_aggression_l1() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1906,7 +1906,7 @@ fn non_originator_aggression_l1() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -2011,7 +2011,7 @@ fn non_originator_aggression_l2() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -2177,7 +2177,7 @@ fn resends_messages_periodically() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = 0u32; + let candidate_index = vec![0u32]; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 63a9c4ccf091..67c9b9b5a855 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -617,6 +617,7 @@ async fn handle_network_msg( gum::trace!(target: LOG_TARGET, ?new_view, "Our view change"); handle_our_view_change(state, new_view); }, + NetworkBridgeEvent::PeerMessage(remote, Versioned::VStaging(message)) | NetworkBridgeEvent::PeerMessage(remote, Versioned::V1(message)) => process_incoming_peer_message(ctx, state, metrics, remote, message, rng).await, } diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 28a84a19b321..0de62b9a9f7a 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -71,6 +71,7 @@ pub(crate) fn send_message( let last_peer = peers.pop(); // optimization: generate the protocol name once. let protocol_name = protocol_names.get_name(peer_set, version); + gum::debug!(target: LOG_TARGET, ?peers, ?version, ?protocol_name, "Sending message to peers",); peers.into_iter().for_each(|peer| { net.write_notification(peer, protocol_name.clone(), message.clone()); }); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 6059f41c08c2..f004153ea9ba 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -32,7 +32,8 @@ use polkadot_node_network_protocol::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, }, - v1 as protocol_v1, ObservedRole, OurView, PeerId, UnifiedReputationChange as Rep, View, + v1 as protocol_v1, vstaging as protocol_vstaging, ObservedRole, OurView, PeerId, + UnifiedReputationChange as Rep, View, }; use polkadot_node_subsystem::{ @@ -245,15 +246,32 @@ where ) .await; - send_message( - &mut network_service, - vec![peer], - PeerSet::Validation, - version, - &peerset_protocol_names, - WireMessage::::ViewUpdate(local_view), - &metrics, - ); + match ValidationVersion::try_from(version) + .expect("try_get_protocol has already checked version is known; qed") + { + ValidationVersion::V1 => send_message( + &mut network_service, + vec![peer], + PeerSet::Validation, + version, + &peerset_protocol_names, + WireMessage::::ViewUpdate( + local_view, + ), + &metrics, + ), + ValidationVersion::V2 => send_message( + &mut network_service, + vec![peer], + PeerSet::Validation, + version, + &peerset_protocol_names, + WireMessage::::ViewUpdate( + local_view, + ), + &metrics, + ), + } }, PeerSet::Collation => { dispatch_collation_events_to_all( @@ -348,8 +366,16 @@ where .filter_map(|(protocol, msg_bytes)| { // version doesn't matter because we always receive on the 'correct' // protocol name, not the negotiated fallback. - let (peer_set, _version) = + let (peer_set, version) = peerset_protocol_names.try_get_protocol(protocol)?; + gum::trace!( + target: LOG_TARGET, + ?peer_set, + ?protocol, + ?version, + "Received notification" + ); + if peer_set == PeerSet::Validation { if expected_versions[PeerSet::Validation].is_none() { return Some(Err(UNCONNECTED_PEERSET_COST)) @@ -420,7 +446,17 @@ where if expected_versions[PeerSet::Validation] == Some(ValidationVersion::V1.into()) { - handle_v1_peer_messages::( + handle_peer_messages::( + remote, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + v_messages, + &metrics, + ) + } else if expected_versions[PeerSet::Validation] == + Some(ValidationVersion::V2.into()) + { + handle_peer_messages::( remote, PeerSet::Validation, &mut shared.0.lock().validation_peers, @@ -434,7 +470,7 @@ where "Major logic bug. Peer somehow has unsupported validation protocol version." ); - never!("Only version 1 is supported; peer set connection checked above; qed"); + never!("Only version 1/2 is supported; peer set connection checked above; qed"); // If a peer somehow triggers this, we'll disconnect them // eventually. @@ -453,7 +489,7 @@ where if expected_versions[PeerSet::Collation] == Some(CollationVersion::V1.into()) { - handle_v1_peer_messages::( + handle_peer_messages::( remote, PeerSet::Collation, &mut shared.0.lock().collation_peers, @@ -716,14 +752,35 @@ fn update_our_view( } ( - shared.validation_peers.keys().cloned().collect::>(), + shared + .validation_peers + .iter() + .map(|(peer_id, peer_data)| (peer_id.clone(), peer_data.version)) + .collect::>(), shared.collation_peers.keys().cloned().collect::>(), ) }; + let filter_by_version = |peers: &[(PeerId, ProtocolVersion)], version| { + peers.iter().filter(|(_, v)| v == &version).map(|(p, _)| *p).collect::>() + }; + + let v1_validation_peers = + filter_by_version(validation_peers.as_slice(), ValidationVersion::V1.into()); + let vstaging_validation_peers = + filter_by_version(&validation_peers, ValidationVersion::V2.into()); + send_validation_message_v1( net, - validation_peers, + v1_validation_peers, + peerset_protocol_names, + WireMessage::ViewUpdate(new_view.clone()), + metrics, + ); + + send_validation_message_vstaging( + net, + vstaging_validation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, @@ -755,7 +812,7 @@ fn update_our_view( // Handle messages on a specific v1 peer-set. The peer is expected to be connected on that // peer-set. -fn handle_v1_peer_messages>( +fn handle_peer_messages>( peer: PeerId, peer_set: PeerSet, peers: &mut HashMap, @@ -813,6 +870,7 @@ fn send_validation_message_v1( message: WireMessage, metrics: &Metrics, ) { + gum::debug!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",); send_message( net, peers, @@ -824,6 +882,25 @@ fn send_validation_message_v1( ); } +fn send_validation_message_vstaging( + net: &mut impl Network, + peers: Vec, + peerset_protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + gum::debug!(target: LOG_TARGET, ?peers, ?message, "Sending validation v2 message to peers",); + send_message( + net, + peers, + PeerSet::Validation, + ValidationVersion::V2.into(), + peerset_protocol_names, + message, + metrics, + ); +} + fn send_collation_message_v1( net: &mut impl Network, peers: Vec, diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index 32a0ecaf7510..02e4fbc1c350 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -20,7 +20,7 @@ use super::*; use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet, PeerSetProtocolNames, ValidationVersion}, request_response::ReqProtocolNames, - v1 as protocol_v1, PeerId, Versioned, + v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, Versioned, }; use polkadot_node_subsystem::{ @@ -183,6 +183,13 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + Versioned::VStaging(msg) => send_validation_message_vstaging( + &mut network_service, + peers, + peerset_protocol_names, + WireMessage::ProtocolMessage(msg), + &metrics, + ), } }, NetworkBridgeTxMessage::SendValidationMessages(msgs) => { @@ -201,6 +208,13 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + Versioned::VStaging(msg) => send_validation_message_vstaging( + &mut network_service, + peers, + peerset_protocol_names, + WireMessage::ProtocolMessage(msg), + &metrics, + ), } } }, @@ -219,6 +233,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + _ => unimplemented!("collation protocol has only v1; qed"), } }, NetworkBridgeTxMessage::SendCollationMessages(msgs) => { @@ -237,6 +252,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + _ => unimplemented!("collation protocol has only v1; qed"), } } }, @@ -350,6 +366,24 @@ fn send_validation_message_v1( ); } +fn send_validation_message_vstaging( + net: &mut impl Network, + peers: Vec, + protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message( + net, + peers, + PeerSet::Validation, + ValidationVersion::V2.into(), + protocol_names, + message, + metrics, + ); +} + fn send_collation_message_v1( net: &mut impl Network, peers: Vec, diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index cb4a3b4a8f52..4dfb9d7123bd 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -892,6 +892,8 @@ async fn handle_network_msg( NewGossipTopology { .. } => { // impossible! }, + PeerMessage(_, Versioned::VStaging(_)) => + unimplemented!("We only support collator protocol version 1."), } Ok(()) diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 5d5417fb3001..d0e906a2b831 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1075,6 +1075,9 @@ async fn handle_network_msg( PeerMessage(remote, Versioned::V1(msg)) => { process_incoming_peer_message(ctx, state, remote, msg).await; }, + PeerMessage(_, Versioned::VStaging(_)) => { + unimplemented!("We only support collator protocol version 1."); + }, } Ok(()) diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 03d0e2150892..ab6412023ead 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -404,6 +404,9 @@ where NetworkBridgeEvent::OurViewChange(_) => {}, NetworkBridgeEvent::PeerViewChange(_, _) => {}, NetworkBridgeEvent::NewGossipTopology { .. } => {}, + NetworkBridgeEvent::PeerMessage(_, Versioned::VStaging(v)) => { + match v {}; + }, NetworkBridgeEvent::PeerMessage(_, Versioned::V1(v)) => { match v {}; }, diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index d715a5d42fe1..90807558b255 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -251,22 +251,26 @@ impl View { /// A protocol-versioned type. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Versioned { +pub enum Versioned { /// V1 type. V1(V1), + /// VStaging type. + VStaging(VStaging), } -impl Versioned<&'_ V1> { +impl Versioned<&'_ V1, &'_ VStaging> { /// Convert to a fully-owned version of the message. - pub fn clone_inner(&self) -> Versioned { + pub fn clone_inner(&self) -> Versioned { match *self { Versioned::V1(inner) => Versioned::V1(inner.clone()), + Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()), } } } -/// All supported versions of the validation protocol message v1. -pub type VersionedValidationProtocol = Versioned; +/// All supported versions of the validation protocol message. +pub type VersionedValidationProtocol = + Versioned; impl From for VersionedValidationProtocol { fn from(v1: v1::ValidationProtocol) -> Self { @@ -274,8 +278,14 @@ impl From for VersionedValidationProtocol { } } +impl From for VersionedValidationProtocol { + fn from(vstaging: vstaging::ValidationProtocol) -> Self { + VersionedValidationProtocol::VStaging(vstaging) + } +} + /// All supported versions of the collation protocol message. -pub type VersionedCollationProtocol = Versioned; +pub type VersionedCollationProtocol = Versioned; impl From for VersionedCollationProtocol { fn from(v1: v1::CollationProtocol) -> Self { @@ -289,16 +299,21 @@ macro_rules! impl_versioned_full_protocol_from { fn from(versioned_from: $from) -> $out { match versioned_from { Versioned::V1(x) => Versioned::V1(x.into()), + Versioned::VStaging(x) => Versioned::VStaging(x.into()), } } } }; } - /// Implement `TryFrom` for one versioned enum variant into the inner type. /// `$m_ty::$variant(inner) -> Ok(inner)` macro_rules! impl_versioned_try_from { - ($from:ty, $out:ty, $v1_pat:pat => $v1_out:expr) => { + ( + $from:ty, + $out:ty, + $v1_pat:pat => $v1_out:expr, + $vstaging_pat:pat => $vstaging_out:expr + ) => { impl TryFrom<$from> for $out { type Error = crate::WrongVariant; @@ -306,6 +321,7 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)), + Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)), _ => Err(crate::WrongVariant), } } @@ -318,6 +334,8 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())), + Versioned::VStaging($vstaging_pat) => + Ok(Versioned::VStaging($vstaging_out.clone())), _ => Err(crate::WrongVariant), } } @@ -326,7 +344,8 @@ macro_rules! impl_versioned_try_from { } /// Version-annotated messages used by the bitfield distribution subsystem. -pub type BitfieldDistributionMessage = Versioned; +pub type BitfieldDistributionMessage = + Versioned; impl_versioned_full_protocol_from!( BitfieldDistributionMessage, VersionedValidationProtocol, @@ -335,11 +354,13 @@ impl_versioned_full_protocol_from!( impl_versioned_try_from!( VersionedValidationProtocol, BitfieldDistributionMessage, - v1::ValidationProtocol::BitfieldDistribution(x) => x + v1::ValidationProtocol::BitfieldDistribution(x) => x, + vstaging::ValidationProtocol::BitfieldDistribution(x) => x ); /// Version-annotated messages used by the statement distribution subsystem. -pub type StatementDistributionMessage = Versioned; +pub type StatementDistributionMessage = + Versioned; impl_versioned_full_protocol_from!( StatementDistributionMessage, VersionedValidationProtocol, @@ -348,11 +369,13 @@ impl_versioned_full_protocol_from!( impl_versioned_try_from!( VersionedValidationProtocol, StatementDistributionMessage, - v1::ValidationProtocol::StatementDistribution(x) => x + v1::ValidationProtocol::StatementDistribution(x) => x, + vstaging::ValidationProtocol::StatementDistribution(x) => x ); /// Version-annotated messages used by the approval distribution subsystem. -pub type ApprovalDistributionMessage = Versioned; +pub type ApprovalDistributionMessage = + Versioned; impl_versioned_full_protocol_from!( ApprovalDistributionMessage, VersionedValidationProtocol, @@ -361,11 +384,14 @@ impl_versioned_full_protocol_from!( impl_versioned_try_from!( VersionedValidationProtocol, ApprovalDistributionMessage, - v1::ValidationProtocol::ApprovalDistribution(x) => x + v1::ValidationProtocol::ApprovalDistribution(x) => x, + vstaging::ValidationProtocol::ApprovalDistribution(x) => x + ); /// Version-annotated messages used by the gossip-support subsystem (this is void). -pub type GossipSupportNetworkMessage = Versioned; +pub type GossipSupportNetworkMessage = + Versioned; // This is a void enum placeholder, so never gets sent over the wire. impl TryFrom for GossipSupportNetworkMessage { type Error = WrongVariant; @@ -382,7 +408,8 @@ impl<'a> TryFrom<&'a VersionedValidationProtocol> for GossipSupportNetworkMessag } /// Version-annotated messages used by the bitfield distribution subsystem. -pub type CollatorProtocolMessage = Versioned; +pub type CollatorProtocolMessage = + Versioned; impl_versioned_full_protocol_from!( CollatorProtocolMessage, VersionedCollationProtocol, @@ -391,9 +418,59 @@ impl_versioned_full_protocol_from!( impl_versioned_try_from!( VersionedCollationProtocol, CollatorProtocolMessage, - v1::CollationProtocol::CollatorProtocol(x) => x + v1::CollationProtocol::CollatorProtocol(x) => x, + vstaging::CollationProtocol::CollatorProtocol(x) => x ); +/// A staging version of the validation/collator protocol. +/// Changes: +/// - assignment cert type changed, see `IndirectAssignmentCertV2`. +pub mod vstaging { + use parity_scale_codec::{Decode, Encode}; + use polkadot_node_primitives::approval::{ + IndirectAssignmentCertV2, IndirectSignedApprovalVote, + }; + + use polkadot_primitives::CandidateIndex; + + // Re-export stuff that has not changed since v1. + pub use crate::v1::{ + declare_signature_payload, BitfieldDistributionMessage, CollationProtocol, + CollatorProtocolMessage, GossipSupportNetworkMessage, StatementDistributionMessage, + StatementMetadata, + }; + + /// All network messages on the validation peer-set. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, derive_more::From)] + pub enum ValidationProtocol { + /// Bitfield distribution messages + #[codec(index = 0)] + #[from] + BitfieldDistribution(BitfieldDistributionMessage), + /// Statement distribution messages + #[codec(index = 1)] + #[from] + StatementDistribution(StatementDistributionMessage), + /// Approval distribution messages + #[codec(index = 2)] + #[from] + ApprovalDistribution(ApprovalDistributionMessage), + } + + /// Network messages used by the approval distribution subsystem. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum ApprovalDistributionMessage { + /// Assignments for candidates in recent, unfinalized blocks. + /// + /// Actually checking the assignment may yield a different result. + #[codec(index = 0)] + Assignments(Vec<(IndirectAssignmentCertV2, Vec)>), + /// Approvals for candidates in some recent, unfinalized block. + #[codec(index = 1)] + Approvals(Vec), + } +} + /// v1 notification protocol types. pub mod v1 { use parity_scale_codec::{Decode, Encode}; @@ -493,9 +570,6 @@ pub mod v1 { /// Approvals for candidates in some recent, unfinalized block. #[codec(index = 1)] Approvals(Vec), - /// Assignments version 2 supporting multiple candidates - #[codec(index = 2)] - AssignmentsV2(Vec<(IndirectAssignmentCert, Vec)>), } /// Dummy network message type, so we will receive connect/disconnect events. @@ -554,3 +628,11 @@ pub mod v1 { payload } } + +/// Returns the subset of `peers` with the specified `version`. +pub fn filter_by_peer_version( + peers: &Vec<(PeerId, peer_set::ProtocolVersion)>, + version: peer_set::ProtocolVersion, +) -> Vec { + peers.iter().filter(|(_, v)| v == &version).map(|(p, _)| *p).collect::>() +} diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index a21a4bb2b42c..920efc38fd22 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -118,7 +118,7 @@ impl PeerSet { /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { match self { - PeerSet::Validation => ValidationVersion::V1.into(), + PeerSet::Validation => ValidationVersion::V2.into(), PeerSet::Collation => CollationVersion::V1.into(), } } @@ -141,12 +141,11 @@ impl PeerSet { // Unfortunately, labels must be static strings, so we must manually cover them // for all protocol versions here. match self { - PeerSet::Validation => - if version == ValidationVersion::V1.into() { - Some("validation/1") - } else { - None - }, + PeerSet::Validation => match version { + _ if version == ValidationVersion::V1.into() => Some("validation/1"), + _ if version == ValidationVersion::V2.into() => Some("validation/2"), + _ => None, + }, PeerSet::Collation => if version == CollationVersion::V1.into() { Some("collation/1") @@ -211,7 +210,7 @@ impl From for u32 { pub enum ValidationVersion { /// The first version. V1 = 1, - /// The second version adds `AssignmentsV2` message to approval distribution. + /// The second version adds `AssignmentsV2` message to approval distribution. VStaging V2 = 2, } @@ -227,6 +226,23 @@ impl From for ProtocolVersion { ProtocolVersion(version as u32) } } +/// Marker indicating the version is unknown. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct UnknownVersion; + +impl TryFrom for ValidationVersion { + type Error = UnknownVersion; + + fn try_from(p: ProtocolVersion) -> Result { + for v in Self::iter() { + if v as u32 == p.0 { + return Ok(v) + } + } + + Err(UnknownVersion) + } +} impl From for ProtocolVersion { fn from(version: CollationVersion) -> ProtocolVersion { diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 769c50322061..2d90d29d25d7 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -1700,6 +1700,22 @@ async fn handle_network_update( } } }, + NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message)) => { + handle_incoming_message_and_circulate( + peer, + topology_storage, + peers, + active_heads, + recent_outdated_heads, + ctx, + message, + req_sender, + metrics, + runtime, + rng, + ) + .await; + }, NetworkBridgeEvent::PeerMessage(peer, Versioned::V1(message)) => { handle_incoming_message_and_circulate( peer, diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index e43ec3f75d68..e51111e6e4c8 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -75,6 +75,20 @@ pub enum AssignmentCertKind { /// The core index chosen in this cert. core_index: CoreIndex, }, +} + +/// Certificate is changed compared to `AssignmentCertKind`: +/// - introduced RelayVRFModuloCompact +#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] +pub enum AssignmentCertKindV2 { + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModulo { + /// The sample number used in this cert. + sample: u32, + }, /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. /// @@ -85,6 +99,14 @@ pub enum AssignmentCertKind { /// The assigned cores. core_indices: Vec, }, + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with the index of a particular core. + /// + /// The context is [`RELAY_VRF_DELAY_CONTEXT`] + RelayVRFDelay { + /// The core index chosen in this cert. + core_index: CoreIndex, + }, } /// A certification of assignment. @@ -96,6 +118,51 @@ pub struct AssignmentCert { pub vrf: (VRFOutput, VRFProof), } +/// A certification of assignment. +#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] +pub struct AssignmentCertV2 { + /// The criterion which is claimed to be met by this cert. + pub kind: AssignmentCertKindV2, + /// The VRF showing the criterion is met. + pub vrf: (VRFOutput, VRFProof), +} + +impl From for AssignmentCertV2 { + fn from(cert: AssignmentCert) -> Self { + Self { + kind: match cert.kind { + AssignmentCertKind::RelayVRFDelay { core_index } => + AssignmentCertKindV2::RelayVRFDelay { core_index }, + AssignmentCertKind::RelayVRFModulo { sample } => + AssignmentCertKindV2::RelayVRFModulo { sample }, + }, + vrf: cert.vrf, + } + } +} + +/// Errors that can occur when trying to convert to/from assignment v1/v2 +pub enum AssignmentConversionError { + /// Assignment certificate is not supported in v1. + CertificateNotSupported, +} + +impl TryFrom for AssignmentCert { + type Error = AssignmentConversionError; + fn try_from(cert: AssignmentCertV2) -> Result { + Ok(Self { + kind: match cert.kind { + AssignmentCertKindV2::RelayVRFDelay { core_index } => + AssignmentCertKind::RelayVRFDelay { core_index }, + AssignmentCertKindV2::RelayVRFModulo { sample } => + AssignmentCertKind::RelayVRFModulo { sample }, + // Not supported + _ => return Err(AssignmentConversionError::CertificateNotSupported), + }, + vrf: cert.vrf, + }) + } +} /// An assignment criterion which refers to the candidate under which the assignment is /// relevant by block hash. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] @@ -107,6 +174,40 @@ pub struct IndirectAssignmentCert { /// The cert itself. pub cert: AssignmentCert, } +/// An assignment criterion which refers to the candidate under which the assignment is +/// relevant by block hash. +#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] +pub struct IndirectAssignmentCertV2 { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The validator index. + pub validator: ValidatorIndex, + /// The cert itself. + pub cert: AssignmentCertV2, +} + +impl From for IndirectAssignmentCertV2 { + fn from(indirect_cert: IndirectAssignmentCert) -> Self { + Self { + block_hash: indirect_cert.block_hash, + validator: indirect_cert.validator, + cert: indirect_cert.cert.into(), + } + } +} + +impl TryFrom for IndirectAssignmentCert { + type Error = AssignmentConversionError; + fn try_from( + indirect_cert: IndirectAssignmentCertV2, + ) -> Result { + Ok(Self { + block_hash: indirect_cert.block_hash, + validator: indirect_cert.validator, + cert: indirect_cert.cert.try_into()?, + }) + } +} /// A signed approval vote which references the candidate indirectly via the block. /// diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index f159a5f80482..078b9b5ed049 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -33,7 +33,7 @@ use polkadot_node_network_protocol::{ UnifiedReputationChange, }; use polkadot_node_primitives::{ - approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote}, + approval::{BlockApprovalMeta, IndirectAssignmentCertV2, IndirectSignedApprovalVote}, AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, CollationSecondedSignal, DisputeMessage, DisputeStatus, ErasureChunk, PoV, SignedDisputeStatement, SignedFullStatement, ValidationResult, @@ -769,7 +769,7 @@ pub enum ApprovalVotingMessage { /// Check if the assignment is valid and can be accepted by our view of the protocol. /// Should not be sent unless the block hash is known. CheckAndImportAssignment( - IndirectAssignmentCert, + IndirectAssignmentCertV2, Vec, oneshot::Sender, ), @@ -805,7 +805,7 @@ pub enum ApprovalDistributionMessage { NewBlocks(Vec), /// Distribute an assignment cert from the local validator. The cert is assumed /// to be valid, relevant, and for the given relay-parent and validator index. - DistributeAssignment(IndirectAssignmentCert, Vec), + DistributeAssignment(IndirectAssignmentCertV2, Vec), /// Distribute an approval vote for the local validator. The approval vote is assumed to be /// valid, relevant, and the corresponding approval already issued. /// If not, the subsystem is free to drop the message. From b672d8683575ea564a0c14470021ae900db1243f Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 23 Feb 2023 13:11:55 +0000 Subject: [PATCH 020/105] comment aggression metrics Signed-off-by: Andrei Sandu --- .../approval-distribution/src/metrics.rs | 69 ++++++++++--------- 1 file changed, 37 insertions(+), 32 deletions(-) diff --git a/node/network/approval-distribution/src/metrics.rs b/node/network/approval-distribution/src/metrics.rs index 47bae7065a05..04ab02ea0130 100644 --- a/node/network/approval-distribution/src/metrics.rs +++ b/node/network/approval-distribution/src/metrics.rs @@ -25,9 +25,8 @@ struct MetricsInner { assignments_imported_total: prometheus::Counter, approvals_imported_total: prometheus::Counter, unified_with_peer_total: prometheus::Counter, - aggression_l1_messages_total: prometheus::Counter, - aggression_l2_messages_total: prometheus::Counter, - + // aggression_l1_messages_total: prometheus::Counter, + // aggression_l2_messages_total: prometheus::Counter, time_unify_with_peer: prometheus::Histogram, time_import_pending_now_known: prometheus::Histogram, time_awaiting_approval_voting: prometheus::Histogram, @@ -72,17 +71,17 @@ impl Metrics { .map(|metrics| metrics.time_awaiting_approval_voting.start_timer()) } - pub(crate) fn on_aggression_l1(&self) { - if let Some(metrics) = &self.0 { - metrics.aggression_l1_messages_total.inc(); - } - } - - pub(crate) fn on_aggression_l2(&self) { - if let Some(metrics) = &self.0 { - metrics.aggression_l2_messages_total.inc(); - } - } + // pub(crate) fn on_aggression_l1(&self) { + // if let Some(metrics) = &self.0 { + // metrics.aggression_l1_messages_total.inc(); + // } + // } + + // pub(crate) fn on_aggression_l2(&self) { + // if let Some(metrics) = &self.0 { + // metrics.aggression_l2_messages_total.inc(); + // } + // } } impl MetricsTrait for Metrics { @@ -109,25 +108,31 @@ impl MetricsTrait for Metrics { )?, registry, )?, - aggression_l1_messages_total: prometheus::register( - prometheus::Counter::new( - "polkadot_parachain_approval_distribution_aggression_l1_messages_total", - "Number of messages in approval distribution for which aggression L1 has been triggered", - )?, - registry, - )?, - aggression_l2_messages_total: prometheus::register( - prometheus::Counter::new( - "polkadot_parachain_approval_distribution_aggression_l2_messages_total", - "Number of messages in approval distribution for which aggression L2 has been triggered", - )?, - registry, - )?, + // aggression_l1_messages_total: prometheus::register( + // prometheus::Counter::new( + // "polkadot_parachain_approval_distribution_aggression_l1_messages_total", + // "Number of messages in approval distribution for which aggression L1 has been triggered", + // )?, + // registry, + // )?, + // aggression_l2_messages_total: prometheus::register( + // prometheus::Counter::new( + // "polkadot_parachain_approval_distribution_aggression_l2_messages_total", + // "Number of messages in approval distribution for which aggression L2 has been triggered", + // )?, + // registry, + // )?, time_unify_with_peer: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_time_unify_with_peer", - "Time spent within fn `unify_with_peer`.", - ).buckets(vec![0.000625, 0.00125,0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0,]))?, + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_time_unify_with_peer", + "Time spent within fn `unify_with_peer`.", + ) + .buckets(vec![ + 0.000625, 0.00125, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, + 0.5, 1.0, 2.5, 5.0, 10.0, + ]), + )?, registry, )?, time_import_pending_now_known: prometheus::register( From 2a300de073c830cc37422715d87f9ff207a4ce31 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 11:53:35 +0000 Subject: [PATCH 021/105] finish impl v2/v1 sending Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 19 ++- node/network/bitfield-distribution/src/lib.rs | 113 ++++++++++++----- node/network/gossip-support/src/lib.rs | 4 +- .../network/statement-distribution/src/lib.rs | 118 +++++++++++++----- 4 files changed, 183 insertions(+), 71 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 2b078908c6e5..e5f7e9875397 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1365,14 +1365,13 @@ impl State { .known_by .iter() .filter(|(p, k)| peer_filter(p, k)) - .map(|(p, _)| p) - .cloned() + .filter_map(|(p, _)| self.peer_views.get(p).map(|entry| (*p, entry.version))) .collect::>(); // Add the metadata of the assignment to the knowledge of each peer. for peer in peers.iter() { // we already filtered peers above, so this should always be Some - if let Some(entry) = entry.known_by.get_mut(peer) { + if let Some(entry) = entry.known_by.get_mut(&peer.0) { entry.sent.insert(message_subject.clone(), message_kind); } } @@ -1388,10 +1387,20 @@ impl State { "Sending an approval to peers", ); + let v1_peers = filter_by_peer_version(&peers, ValidationVersion::V1.into()); + let v2_peers = filter_by_peer_version(&peers, ValidationVersion::V2.into()); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - peers, + v1_peers, Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(approvals), + protocol_v1::ApprovalDistributionMessage::Approvals(approvals.clone()), + )), + )) + .await; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + v2_peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals), )), )) .await; diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 67c9b9b5a855..642625ddb33d 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -25,10 +25,11 @@ use futures::{channel::oneshot, FutureExt}; use polkadot_node_network_protocol::{ - self as net_protocol, + self as net_protocol, filter_by_peer_version, grid_topology::{ GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, + peer_set::{ProtocolVersion, ValidationVersion}, v1 as protocol_v1, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_subsystem::{ @@ -69,25 +70,44 @@ struct BitfieldGossipMessage { } impl BitfieldGossipMessage { - fn into_validation_protocol(self) -> net_protocol::VersionedValidationProtocol { - self.into_network_message().into() + fn into_validation_protocol( + self, + protocol_version: ProtocolVersion, + ) -> net_protocol::VersionedValidationProtocol { + self.into_network_message(protocol_version).into() } - fn into_network_message(self) -> net_protocol::BitfieldDistributionMessage { - Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield( + fn into_network_message( + self, + protocol_version: ProtocolVersion, + ) -> net_protocol::BitfieldDistributionMessage { + // VStaging re-exports v1 message type. + let message = protocol_v1::BitfieldDistributionMessage::Bitfield( self.relay_parent, self.signed_availability.into(), - )) + ); + + match ValidationVersion::try_from(protocol_version) { + Ok(ValidationVersion::V1) => Versioned::V1(message), + Ok(ValidationVersion::V2) => Versioned::VStaging(message), + Err(_) => unreachable!("Invalid peer protocol"), + } } } +// We keep track of each peer view and protocol version using this struct. +struct PeerEntry { + pub view: View, + pub version: net_protocol::peer_set::ProtocolVersion, +} + /// Data used to track information of peers and relay parents the /// overseer ordered us to work on. -#[derive(Default, Debug)] +#[derive(Default)] struct ProtocolState { - /// Track all active peers and their views + /// Track all active peer views and protocol versions /// to determine what is relevant to them. - peer_views: HashMap, + peer_entries: HashMap, /// The current and previous gossip topologies topologies: SessionBoundGridTopologyStorage, @@ -334,7 +354,7 @@ async fn handle_bitfield_distribution( ctx, job_data, topology, - &mut state.peer_views, + &mut state.peer_entries, validator, msg, required_routing, @@ -353,7 +373,7 @@ async fn relay_message( ctx: &mut Context, job_data: &mut PerRelayParentData, topology_neighbors: &GridNeighbors, - peer_views: &mut HashMap, + peer_entries: &mut HashMap, validator: ValidatorId, message: BitfieldGossipMessage, required_routing: RequiredRouting, @@ -371,16 +391,16 @@ async fn relay_message( .await; drop(_span); - let total_peers = peer_views.len(); + let total_peers = peer_entries.len(); let mut random_routing: RandomRouting = Default::default(); let _span = span.child("interested-peers"); // pass on the bitfield distribution to all interested peers - let interested_peers = peer_views + let interested_peers = peer_entries .iter() - .filter_map(|(peer, view)| { + .filter_map(|(peer, entry)| { // check interest in the peer in this message's relay parent - if view.contains(&message.relay_parent) { + if entry.view.contains(&message.relay_parent) { let message_needed = job_data.message_from_validator_needed_by_peer(&peer, &validator); if message_needed { @@ -395,7 +415,7 @@ async fn relay_message( }; if need_routing { - Some(*peer) + Some((*peer, entry.version)) } else { None } @@ -406,13 +426,13 @@ async fn relay_message( None } }) - .collect::>(); + .collect::>(); interested_peers.iter().for_each(|peer| { // track the message as sent for this peer job_data .message_sent_to_peer - .entry(*peer) + .entry(peer.0) .or_default() .insert(validator.clone()); }); @@ -427,9 +447,19 @@ async fn relay_message( ); } else { let _span = span.child("gossip"); + + let v1_peers = filter_by_peer_version(&interested_peers, ValidationVersion::V1.into()); + let v2_peers = filter_by_peer_version(&interested_peers, ValidationVersion::V2.into()); + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + v1_peers, + message.clone().into_validation_protocol(ValidationVersion::V1.into()), + )) + .await; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - interested_peers, - message.into_validation_protocol(), + v2_peers, + message.into_validation_protocol(ValidationVersion::V2.into()), )) .await; } @@ -544,7 +574,7 @@ async fn process_incoming_peer_message( ctx, job_data, topology, - &mut state.peer_views, + &mut state.peer_entries, validator, message, required_routing, @@ -568,15 +598,18 @@ async fn handle_network_msg( let _timer = metrics.time_handle_network_msg(); match bridge_message { - NetworkBridgeEvent::PeerConnected(peer, role, _, _) => { + NetworkBridgeEvent::PeerConnected(peer, role, version, _) => { gum::trace!(target: LOG_TARGET, ?peer, ?role, "Peer connected"); // insert if none already present - state.peer_views.entry(peer).or_default(); + state + .peer_entries + .entry(peer) + .or_insert(PeerEntry { view: Default::default(), version }); }, NetworkBridgeEvent::PeerDisconnected(peer) => { gum::trace!(target: LOG_TARGET, ?peer, "Peer disconnected"); // get rid of superfluous data - state.peer_views.remove(&peer); + state.peer_entries.remove(&peer); }, NetworkBridgeEvent::NewGossipTopology(gossip_topology) => { let session_index = gossip_topology.session; @@ -604,14 +637,25 @@ async fn handle_network_msg( // in case we already knew that peer in the past // it might have had an existing view, we use to initialize // and minimize the delta on `PeerViewChange` to be sent - if let Some(old_view) = state.peer_views.remove(&new_peer) { - handle_peer_view_change(ctx, state, new_peer, old_view, rng).await; + if let Some(entry) = state.peer_entries.remove(&new_peer) { + handle_peer_view_change(ctx, state, new_peer, entry.version, entry.view, rng) + .await; } } }, - NetworkBridgeEvent::PeerViewChange(peerid, new_view) => { - gum::trace!(target: LOG_TARGET, ?peerid, ?new_view, "Peer view change"); - handle_peer_view_change(ctx, state, peerid, new_view, rng).await; + NetworkBridgeEvent::PeerViewChange(peer_id, new_view) => { + gum::trace!(target: LOG_TARGET, ?peer_id, ?new_view, "Peer view change"); + if let Some(entry) = state.peer_entries.get(&peer_id) { + handle_peer_view_change( + ctx, + state, + peer_id, + entry.version, + entry.view.clone(), + rng, + ) + .await; + } }, NetworkBridgeEvent::OurViewChange(new_view) => { gum::trace!(target: LOG_TARGET, ?new_view, "Our view change"); @@ -652,13 +696,15 @@ async fn handle_peer_view_change( ctx: &mut Context, state: &mut ProtocolState, origin: PeerId, + version: ProtocolVersion, view: View, rng: &mut (impl CryptoRng + Rng), ) { let added = state - .peer_views + .peer_entries .entry(origin) - .or_default() + .or_insert(PeerEntry { version, view: Default::default() }) + .view .replace_difference(view) .cloned() .collect::>(); @@ -699,7 +745,7 @@ async fn handle_peer_view_change( .collect(); for (validator, message) in delta_set.into_iter() { - send_tracked_gossip_message(ctx, state, origin, validator, message).await; + send_tracked_gossip_message(ctx, state, origin, version, validator, message).await; } } @@ -709,6 +755,7 @@ async fn send_tracked_gossip_message( ctx: &mut Context, state: &mut ProtocolState, dest: PeerId, + protocol_version: ProtocolVersion, validator: ValidatorId, message: BitfieldGossipMessage, ) { @@ -731,7 +778,7 @@ async fn send_tracked_gossip_message( ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( vec![dest], - message.into_validation_protocol(), + message.into_validation_protocol(protocol_version), )) .await; } diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index ab6412023ead..14a6cb46da40 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -404,9 +404,7 @@ where NetworkBridgeEvent::OurViewChange(_) => {}, NetworkBridgeEvent::PeerViewChange(_, _) => {}, NetworkBridgeEvent::NewGossipTopology { .. } => {}, - NetworkBridgeEvent::PeerMessage(_, Versioned::VStaging(v)) => { - match v {}; - }, + NetworkBridgeEvent::PeerMessage(_, Versioned::VStaging(v)) | NetworkBridgeEvent::PeerMessage(_, Versioned::V1(v)) => { match v {}; }, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 2d90d29d25d7..45824bd452dd 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -26,12 +26,13 @@ use error::{log_error, FatalResult, JfyiErrorResult}; use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ - self as net_protocol, + self as net_protocol, filter_by_peer_version, grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, - peer_set::{IsAuthority, PeerSet}, + peer_set::{IsAuthority, PeerSet, ProtocolVersion, ValidationVersion}, request_response::{v1 as request_v1, IncomingRequestReceiver}, v1::{self as protocol_v1, StatementMetadata}, - IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, + vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{SignedFullStatement, Statement, UncheckedSignedFullStatement}; use polkadot_node_subsystem_util::{self as util, rand, MIN_GOSSIP_PEERS}; @@ -444,6 +445,8 @@ struct PeerData { view_knowledge: HashMap, /// Peer might be known as authority with the given ids. maybe_authority: Option>, + /// Protocol version + version: ProtocolVersion, } impl PeerData { @@ -965,24 +968,47 @@ fn statement_message( relay_parent: Hash, statement: SignedFullStatement, metrics: &Metrics, + protocol_version: ProtocolVersion, ) -> net_protocol::VersionedValidationProtocol { let (is_large, size) = is_statement_large(&statement); if let Some(size) = size { metrics.on_created_message(size); } - let msg = if is_large { - protocol_v1::StatementDistributionMessage::LargeStatement(StatementMetadata { - relay_parent, - candidate_hash: statement.payload().candidate_hash(), - signed_by: statement.validator_index(), - signature: statement.signature().clone(), - }) - } else { - protocol_v1::StatementDistributionMessage::Statement(relay_parent, statement.into()) - }; + match ValidationVersion::try_from(protocol_version) { + Ok(ValidationVersion::V1) => { + let msg = if is_large { + protocol_v1::StatementDistributionMessage::LargeStatement(StatementMetadata { + relay_parent, + candidate_hash: statement.payload().candidate_hash(), + signed_by: statement.validator_index(), + signature: statement.signature().clone(), + }) + } else { + protocol_v1::StatementDistributionMessage::Statement(relay_parent, statement.into()) + }; + + protocol_v1::ValidationProtocol::StatementDistribution(msg).into() + }, + Ok(ValidationVersion::V2) => { + let msg = if is_large { + protocol_vstaging::StatementDistributionMessage::LargeStatement(StatementMetadata { + relay_parent, + candidate_hash: statement.payload().candidate_hash(), + signed_by: statement.validator_index(), + signature: statement.signature().clone(), + }) + } else { + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.into(), + ) + }; - protocol_v1::ValidationProtocol::StatementDistribution(msg).into() + protocol_vstaging::ValidationProtocol::StatementDistribution(msg).into() + }, + Err(_) => unreachable!("Invalid peer protocol"), + } } /// Check whether a statement should be treated as large statement. @@ -1067,20 +1093,19 @@ async fn circulate_statement<'a, Context>( peers_to_send.len() == peers_to_send.clone().into_iter().collect::>().len(), "We filter out duplicates above. qed.", ); - let peers_to_send: Vec<(PeerId, bool)> = peers_to_send + let peers_to_send: Vec<(PeerId, bool, ProtocolVersion)> = peers_to_send .into_iter() .map(|peer_id| { - let new = peers - .get_mut(&peer_id) - .expect("a subset is taken above, so it exists; qed") - .send(&relay_parent, &fingerprint); - (peer_id, new) + let peer_data = + peers.get_mut(&peer_id).expect("a subset is taken above, so it exists; qed"); + + let new = peer_data.send(&relay_parent, &fingerprint); + (peer_id, new, peer_data.version) }) .collect(); // Send all these peers the initial statement. if !peers_to_send.is_empty() { - let payload = statement_message(relay_parent, stored.statement.clone(), metrics); gum::trace!( target: LOG_TARGET, ?peers_to_send, @@ -1088,16 +1113,38 @@ async fn circulate_statement<'a, Context>( statement = ?stored.statement, "Sending statement", ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - peers_to_send.iter().map(|(p, _)| *p).collect(), - payload, - )) - .await; + + let peers_to_send = + peers_to_send.iter().map(|(p, _, version)| (*p, *version)).collect::>(); + let v1_peers = filter_by_peer_version(&peers_to_send, ValidationVersion::V1.into()); + let v2_peers = filter_by_peer_version(&peers_to_send, ValidationVersion::V2.into()); + + if v1_peers.len() > 0 { + let payload = statement_message( + relay_parent, + stored.statement.clone(), + metrics, + ValidationVersion::V1.into(), + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(v1_peers, payload)) + .await; + } + + if v2_peers.len() > 0 { + let payload = statement_message( + relay_parent, + stored.statement.clone(), + metrics, + ValidationVersion::V2.into(), + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(v2_peers, payload)) + .await; + } } peers_to_send .into_iter() - .filter_map(|(peer, needs_dependent)| if needs_dependent { Some(peer) } else { None }) + .filter_map(|(peer, needs_dependent, _)| if needs_dependent { Some(peer) } else { None }) .collect() } @@ -1118,7 +1165,12 @@ async fn send_statements_about( continue } peer_data.send(&relay_parent, &fingerprint); - let payload = statement_message(relay_parent, statement.statement.clone(), metrics); + let payload = statement_message( + relay_parent, + statement.statement.clone(), + metrics, + peer_data.version, + ); gum::trace!( target: LOG_TARGET, @@ -1151,7 +1203,12 @@ async fn send_statements( continue } peer_data.send(&relay_parent, &fingerprint); - let payload = statement_message(relay_parent, statement.statement.clone(), metrics); + let payload = statement_message( + relay_parent, + statement.statement.clone(), + metrics, + peer_data.version, + ); gum::trace!( target: LOG_TARGET, @@ -1645,7 +1702,7 @@ async fn handle_network_update( R: rand::Rng, { match update { - NetworkBridgeEvent::PeerConnected(peer, role, _, maybe_authority) => { + NetworkBridgeEvent::PeerConnected(peer, role, version, maybe_authority) => { gum::trace!(target: LOG_TARGET, ?peer, ?role, "Peer connected"); peers.insert( peer, @@ -1653,6 +1710,7 @@ async fn handle_network_update( view: Default::default(), view_knowledge: Default::default(), maybe_authority: maybe_authority.clone(), + version, }, ); if let Some(authority_ids) = maybe_authority { From 7858ad524077565847dda9ad7e60833e3d757428 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 13:19:29 +0000 Subject: [PATCH 022/105] fix view change handling typo bug Signed-off-by: Andrei Sandu --- node/network/bitfield-distribution/src/lib.rs | 10 +--------- node/network/bridge/src/tx/mod.rs | 2 ++ 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 642625ddb33d..84f9ffa0e3c8 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -646,15 +646,7 @@ async fn handle_network_msg( NetworkBridgeEvent::PeerViewChange(peer_id, new_view) => { gum::trace!(target: LOG_TARGET, ?peer_id, ?new_view, "Peer view change"); if let Some(entry) = state.peer_entries.get(&peer_id) { - handle_peer_view_change( - ctx, - state, - peer_id, - entry.version, - entry.view.clone(), - rng, - ) - .await; + handle_peer_view_change(ctx, state, peer_id, entry.version, new_view, rng).await; } }, NetworkBridgeEvent::OurViewChange(new_view) => { diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index 02e4fbc1c350..98c00598f7fe 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -172,6 +172,7 @@ where gum::trace!( target: LOG_TARGET, action = "SendValidationMessages", + ?msg, num_messages = 1usize, ); @@ -197,6 +198,7 @@ where target: LOG_TARGET, action = "SendValidationMessages", num_messages = %msgs.len(), + ?msgs, ); for (peers, msg) in msgs { From 0cf73fdfdc2117d4f3c48e141ee4952ac7494a26 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 13:32:00 +0000 Subject: [PATCH 023/105] Rename leftover V2 to vstaging Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 6 +++--- node/network/bitfield-distribution/src/lib.rs | 7 ++++--- node/network/bridge/src/rx/mod.rs | 8 ++++---- node/network/bridge/src/tx/mod.rs | 2 +- node/network/protocol/src/peer_set.rs | 6 +++--- node/network/statement-distribution/src/lib.rs | 6 +++--- 6 files changed, 18 insertions(+), 17 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index e5f7e9875397..1d30f20366ce 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1388,7 +1388,7 @@ impl State { ); let v1_peers = filter_by_peer_version(&peers, ValidationVersion::V1.into()); - let v2_peers = filter_by_peer_version(&peers, ValidationVersion::V2.into()); + let v2_peers = filter_by_peer_version(&peers, ValidationVersion::VStaging.into()); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( v1_peers, @@ -1999,7 +1999,7 @@ pub(crate) async fn send_assignments_batched( peers: &Vec<(PeerId, ProtocolVersion)>, ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); - let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); + let v2_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); if v1_peers.len() > 0 { let mut v1_assignments = v2_assignments.clone(); @@ -2035,7 +2035,7 @@ pub(crate) async fn send_approvals_batched( peers: &Vec<(PeerId, ProtocolVersion)>, ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); - let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); + let v2_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); if v1_peers.len() > 0 { let mut batches = approvals.clone().into_iter().peekable(); diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 84f9ffa0e3c8..71769a62e86d 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -89,7 +89,7 @@ impl BitfieldGossipMessage { match ValidationVersion::try_from(protocol_version) { Ok(ValidationVersion::V1) => Versioned::V1(message), - Ok(ValidationVersion::V2) => Versioned::VStaging(message), + Ok(ValidationVersion::VStaging) => Versioned::VStaging(message), Err(_) => unreachable!("Invalid peer protocol"), } } @@ -449,7 +449,8 @@ async fn relay_message( let _span = span.child("gossip"); let v1_peers = filter_by_peer_version(&interested_peers, ValidationVersion::V1.into()); - let v2_peers = filter_by_peer_version(&interested_peers, ValidationVersion::V2.into()); + let v2_peers = + filter_by_peer_version(&interested_peers, ValidationVersion::VStaging.into()); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( v1_peers, @@ -459,7 +460,7 @@ async fn relay_message( ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( v2_peers, - message.into_validation_protocol(ValidationVersion::V2.into()), + message.into_validation_protocol(ValidationVersion::VStaging.into()), )) .await; } diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index f004153ea9ba..d0b3459f8626 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -260,7 +260,7 @@ where ), &metrics, ), - ValidationVersion::V2 => send_message( + ValidationVersion::VStaging => send_message( &mut network_service, vec![peer], PeerSet::Validation, @@ -454,7 +454,7 @@ where &metrics, ) } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::V2.into()) + Some(ValidationVersion::VStaging.into()) { handle_peer_messages::( remote, @@ -768,7 +768,7 @@ fn update_our_view( let v1_validation_peers = filter_by_version(validation_peers.as_slice(), ValidationVersion::V1.into()); let vstaging_validation_peers = - filter_by_version(&validation_peers, ValidationVersion::V2.into()); + filter_by_version(&validation_peers, ValidationVersion::VStaging.into()); send_validation_message_v1( net, @@ -894,7 +894,7 @@ fn send_validation_message_vstaging( net, peers, PeerSet::Validation, - ValidationVersion::V2.into(), + ValidationVersion::VStaging.into(), peerset_protocol_names, message, metrics, diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index 98c00598f7fe..3d136d40220c 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -379,7 +379,7 @@ fn send_validation_message_vstaging( net, peers, PeerSet::Validation, - ValidationVersion::V2.into(), + ValidationVersion::VStaging.into(), protocol_names, message, metrics, diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index 920efc38fd22..42091a931961 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -118,7 +118,7 @@ impl PeerSet { /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { match self { - PeerSet::Validation => ValidationVersion::V2.into(), + PeerSet::Validation => ValidationVersion::VStaging.into(), PeerSet::Collation => CollationVersion::V1.into(), } } @@ -143,7 +143,7 @@ impl PeerSet { match self { PeerSet::Validation => match version { _ if version == ValidationVersion::V1.into() => Some("validation/1"), - _ if version == ValidationVersion::V2.into() => Some("validation/2"), + _ if version == ValidationVersion::VStaging.into() => Some("validation/2"), _ => None, }, PeerSet::Collation => @@ -211,7 +211,7 @@ pub enum ValidationVersion { /// The first version. V1 = 1, /// The second version adds `AssignmentsV2` message to approval distribution. VStaging - V2 = 2, + VStaging = 2, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 45824bd452dd..aecb4e206f6e 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -990,7 +990,7 @@ fn statement_message( protocol_v1::ValidationProtocol::StatementDistribution(msg).into() }, - Ok(ValidationVersion::V2) => { + Ok(ValidationVersion::VStaging) => { let msg = if is_large { protocol_vstaging::StatementDistributionMessage::LargeStatement(StatementMetadata { relay_parent, @@ -1117,7 +1117,7 @@ async fn circulate_statement<'a, Context>( let peers_to_send = peers_to_send.iter().map(|(p, _, version)| (*p, *version)).collect::>(); let v1_peers = filter_by_peer_version(&peers_to_send, ValidationVersion::V1.into()); - let v2_peers = filter_by_peer_version(&peers_to_send, ValidationVersion::V2.into()); + let v2_peers = filter_by_peer_version(&peers_to_send, ValidationVersion::VStaging.into()); if v1_peers.len() > 0 { let payload = statement_message( @@ -1135,7 +1135,7 @@ async fn circulate_statement<'a, Context>( relay_parent, stored.statement.clone(), metrics, - ValidationVersion::V2.into(), + ValidationVersion::VStaging.into(), ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(v2_peers, payload)) .await; From 412282a6c5b9de0be7eab35065a6f09ad9353928 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 13:39:55 +0000 Subject: [PATCH 024/105] Disable assignments V2 Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 96 ++++++++++++++++++++--- 1 file changed, 84 insertions(+), 12 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 5c6f5f68cca5..8805d1e0cf13 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -18,7 +18,8 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ - self as approval_types, AssignmentCertKindV2, AssignmentCertV2, DelayTranche, RelayVRFStory, + self as approval_types, AssignmentCert, AssignmentCertKind, AssignmentCertKindV2, + AssignmentCertV2, DelayTranche, RelayVRFStory, }; use polkadot_primitives::{ AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, IndexedVec, SessionInfo, @@ -250,7 +251,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) } fn check_assignment_cert( @@ -288,6 +289,7 @@ pub(crate) fn compute_assignments( relay_vrf_story: RelayVRFStory, config: &Config, leaving_cores: impl IntoIterator + Clone, + enable_v2_assignments: bool, ) -> HashMap { if config.n_cores == 0 || config.assignment_keys.is_empty() || @@ -345,16 +347,25 @@ pub(crate) fn compute_assignments( let mut assignments = HashMap::new(); - // TODO: support all vrf modulo assignment kinds. - // For now we only do compact. - compute_relay_vrf_modulo_assignments( - &assignments_key, - index, - config, - relay_vrf_story.clone(), - leaving_cores.clone(), - &mut assignments, - ); + if enable_v2_assignments { + compute_relay_vrf_modulo_assignments_v2( + &assignments_key, + index, + config, + relay_vrf_story.clone(), + leaving_cores.clone(), + &mut assignments, + ); + } else { + compute_relay_vrf_modulo_assignments( + &assignments_key, + index, + config, + relay_vrf_story.clone(), + leaving_cores.clone(), + &mut assignments, + ); + } //TODO: Add assignment into `assignments` per core map. @@ -372,6 +383,67 @@ pub(crate) fn compute_assignments( } fn compute_relay_vrf_modulo_assignments( + assignments_key: &schnorrkel::Keypair, + validator_index: ValidatorIndex, + config: &Config, + relay_vrf_story: RelayVRFStory, + leaving_cores: impl IntoIterator + Clone, + assignments: &mut HashMap, +) { + for rvm_sample in 0..config.relay_vrf_modulo_samples { + let mut core = CoreIndex::default(); + + let maybe_assignment = { + // Extra scope to ensure borrowing instead of moving core + // into closure. + let core = &mut core; + assignments_key.vrf_sign_extra_after_check( + relay_vrf_modulo_transcript(relay_vrf_story.clone(), rvm_sample), + |vrf_in_out| { + *core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); + if let Some((candidate_hash, _)) = + leaving_cores.clone().into_iter().find(|(_, c)| c == core) + { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?core, + ?validator_index, + tranche = 0, + "RelayVRFModulo Assignment." + ); + + Some(assigned_core_transcript(*core)) + } else { + None + } + }, + ) + }; + + if let Some((vrf_in_out, vrf_proof, _)) = maybe_assignment { + // Sanity: `core` is always initialized to non-default here, as the closure above + // has been executed. + let cert = AssignmentCert { + kind: AssignmentCertKind::RelayVRFModulo { sample: rvm_sample }, + vrf: ( + approval_types::VRFOutput(vrf_in_out.to_output()), + approval_types::VRFProof(vrf_proof), + ), + }; + + // All assignments of type RelayVRFModulo have tranche 0. + assignments.entry(core).or_insert(OurAssignment { + cert: cert.into(), + tranche: 0, + validator_index, + triggered: false, + }); + } + } +} + +fn compute_relay_vrf_modulo_assignments_v2( assignments_key: &schnorrkel::Keypair, validator_index: ValidatorIndex, config: &Config, From af3b64bacf33092aedbb0f81b8b71429134c4cda Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 14:03:47 +0000 Subject: [PATCH 025/105] update todo, docs Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 11 +++----- node/network/approval-distribution/src/lib.rs | 27 ++++--------------- node/network/gossip-support/src/lib.rs | 7 ++--- 3 files changed, 11 insertions(+), 34 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 8805d1e0cf13..e1367ddd623e 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -367,8 +367,6 @@ pub(crate) fn compute_assignments( ); } - //TODO: Add assignment into `assignments` per core map. - // Then run `RelayVRFDelay` once for the whole block. compute_relay_vrf_delay_assignments( &assignments_key, @@ -659,11 +657,10 @@ pub(crate) fn check_assignment_cert( let resulting_cores = relay_vrf_modulo_cores(&vrf_in_out, *sample + 1, config.n_cores); - // TODO: Enforce that all claimable cores are claimed. Currently validators can opt out of checking specific cores. - // This is similar to how validator can opt out and not send assignments in the first place. - // However it can happen that malicious nodes modify the assignment and remove some of the claimed cores from it, - // but this shouldnt be a problem as we will eventually receive the original assignment assuming 1/3 malicious. - // + // TODO: Enforce that all claimable cores are claimed, or include refused cores. + // Currently validators can opt out of checking specific cores. + // This is the same issue to how validator can opt out and not send their assignments in the first place. + // Ensure that the `vrf_in_out` actually includes all of the claimed cores. if claimed_core_indices .iter() diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 1d30f20366ce..07b0721da878 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -743,7 +743,7 @@ impl State { target: LOG_TARGET, peer_id = %peer_id, num = assignments.len(), - "Processing assignments (V2) from a peer", + "Processing assignments from a peer", ); self.process_incoming_assignments(ctx, metrics, peer_id, assignments, rng).await; }, @@ -752,7 +752,7 @@ impl State { target: LOG_TARGET, peer_id = %peer_id, num = assignments.len(), - "Processing assignments (V1) from a peer", + "Processing assignments from a peer", ); self.process_incoming_assignments( @@ -1426,20 +1426,6 @@ impl State { Some(e) => e, }; - // // TODO: fix mapping of candidates to validator index and claimed indices - // let candidate_entry = match block_entry.candidates.get(index as usize) { - // None => { - // gum::debug!( - // target: LOG_TARGET, - // ?hash, - // ?index, - // "`get_approval_signatures`: could not find candidate entry for given hash and index!" - // ); - // continue - // }, - // Some(e) => e, - // }; - let sigs = block_entry .get_approval_entries(index) .into_iter() @@ -1456,7 +1442,6 @@ impl State { all_sigs } - // TODO: Refactor as in `adjust_required_routing_and_propagate`. async fn unify_with_peer( sender: &mut impl overseer::ApprovalDistributionSenderTrait, metrics: &Metrics, @@ -1875,7 +1860,9 @@ impl ApprovalDistribution { state.handle_new_blocks(ctx, metrics, metas, rng).await; }, ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { - // TODO: approval voting bug: Fix `Importing locally an already known assignment` for multiple candidate assignments. + // TODO: Fix warning: `Importing locally an already known assignment` for multiple candidate assignments. + // This is due to the fact that we call this on wakeup, and we do have a wakeup for each candidate index, but + // there is only one assignment. gum::debug!( target: LOG_TARGET, "Distributing our assignment on candidate (block={}, indices={:?})", @@ -2010,8 +1997,6 @@ pub(crate) async fn send_assignments_batched( while v1_batches.peek().is_some() { let batch: Vec<_> = v1_batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect(); - - // If there are multiple candidates claimed this is only supported for V2 send_assignments_batched_inner(sender, batch, &v1_peers, 1).await; } } @@ -2021,8 +2006,6 @@ pub(crate) async fn send_assignments_batched( while v2_batches.peek().is_some() { let batch = v2_batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); - - // If there are multiple candidates claimed this is only supported for V2 send_assignments_batched_inner(sender, batch, &v2_peers, 2).await; } } diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 14a6cb46da40..1d6d00745a1b 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -41,7 +41,7 @@ use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; use polkadot_node_network_protocol::{ authority_discovery::AuthorityDiscovery, peer_set::PeerSet, GossipSupportNetworkMessage, - PeerId, Versioned, + PeerId, }; use polkadot_node_subsystem::{ messages::{ @@ -404,10 +404,7 @@ where NetworkBridgeEvent::OurViewChange(_) => {}, NetworkBridgeEvent::PeerViewChange(_, _) => {}, NetworkBridgeEvent::NewGossipTopology { .. } => {}, - NetworkBridgeEvent::PeerMessage(_, Versioned::VStaging(v)) | - NetworkBridgeEvent::PeerMessage(_, Versioned::V1(v)) => { - match v {}; - }, + NetworkBridgeEvent::PeerMessage(_, _) => {}, } } From 6612b3cf5b3f1a1913cd55e31038f7514249c60c Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 14:28:22 +0000 Subject: [PATCH 026/105] fmt Signed-off-by: Andrei Sandu --- node/network/gossip-support/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 1d6d00745a1b..5c11141199ef 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -40,8 +40,7 @@ use sp_application_crypto::{AppKey, ByteArray}; use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; use polkadot_node_network_protocol::{ - authority_discovery::AuthorityDiscovery, peer_set::PeerSet, GossipSupportNetworkMessage, - PeerId, + authority_discovery::AuthorityDiscovery, peer_set::PeerSet, GossipSupportNetworkMessage, PeerId, }; use polkadot_node_subsystem::{ messages::{ From f8ccec9e4f7776239c939ead6beda857e0316770 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 22:26:39 +0000 Subject: [PATCH 027/105] Temporarly disable CI cargo test to get a build for burn-in Signed-off-by: Andrei Sandu --- scripts/ci/gitlab/pipeline/test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index ac77698f43cc..2c7e3fc34344 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -42,7 +42,8 @@ test-linux-stable: # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - - time cargo test --workspace --profile testnet --verbose --locked --features=runtime-benchmarks,runtime-metrics,try-runtime + # - time cargo test --workspace --profile testnet --verbose --locked --features=runtime-benchmarks,runtime-metrics,try-runtime + - sleep 1 .check-dependent-project: &check-dependent-project stage: test From ef42e0c8154f1748f5b11b2aa4e204deb97cf49d Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 23:18:41 +0000 Subject: [PATCH 028/105] debug -> trace Signed-off-by: Andrei Sandu --- node/network/bridge/src/network.rs | 2 +- node/network/bridge/src/rx/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 0de62b9a9f7a..c606834a6afc 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -71,7 +71,7 @@ pub(crate) fn send_message( let last_peer = peers.pop(); // optimization: generate the protocol name once. let protocol_name = protocol_names.get_name(peer_set, version); - gum::debug!(target: LOG_TARGET, ?peers, ?version, ?protocol_name, "Sending message to peers",); + gum::trace!(target: LOG_TARGET, ?peers, ?version, ?protocol_name, "Sending message to peers",); peers.into_iter().for_each(|peer| { net.write_notification(peer, protocol_name.clone(), message.clone()); }); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index d0b3459f8626..7408a0b25c71 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -870,7 +870,7 @@ fn send_validation_message_v1( message: WireMessage, metrics: &Metrics, ) { - gum::debug!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",); + gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",); send_message( net, peers, @@ -889,7 +889,7 @@ fn send_validation_message_vstaging( message: WireMessage, metrics: &Metrics, ) { - gum::debug!(target: LOG_TARGET, ?peers, ?message, "Sending validation v2 message to peers",); + gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v2 message to peers",); send_message( net, peers, From 0c31d61b27bd93cf2f76a31bb1ef09d601cab6b8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 24 Feb 2023 23:43:18 +0000 Subject: [PATCH 029/105] enable v2 assignments Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index e1367ddd623e..599382acd98e 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -251,7 +251,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, true) } fn check_assignment_cert( From e3598b1c09d6973a1d8e3a2bf7d6a8f26f923fa3 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 10 Mar 2023 12:45:37 +0000 Subject: [PATCH 030/105] Remove unimplemented! Signed-off-by: Andrei Sandu --- node/network/bridge/src/tx/mod.rs | 16 ++++++++++++++-- .../collator-protocol/src/collator_side/mod.rs | 6 ++++-- .../collator-protocol/src/validator_side/mod.rs | 7 ++++--- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index 3d136d40220c..c166e5120daf 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -235,7 +235,14 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - _ => unimplemented!("collation protocol has only v1; qed"), + _ => { + gum::warn!( + target: LOG_TARGET, + action = "SendCollationMessages", + num_messages = 1usize, + "Attempted to send collation message on invalid protocol version. Only v1 supported." + ); + }, } }, NetworkBridgeTxMessage::SendCollationMessages(msgs) => { @@ -254,7 +261,12 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - _ => unimplemented!("collation protocol has only v1; qed"), + _ => gum::warn!( + target: LOG_TARGET, + action = "SendCollationMessages", + num_messages = 1usize, + "Attempted to send collation message on invalid protocol version" + ), } } }, diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 4dfb9d7123bd..5f0edc10cf51 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -892,8 +892,10 @@ async fn handle_network_msg( NewGossipTopology { .. } => { // impossible! }, - PeerMessage(_, Versioned::VStaging(_)) => - unimplemented!("We only support collator protocol version 1."), + PeerMessage(_, Versioned::VStaging(_)) => gum::warn!( + target: LOG_TARGET, + "Received message on invalid collator protocol version. Only v1 supported", + ), } Ok(()) diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index d0e906a2b831..a16f6662a9b8 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1075,9 +1075,10 @@ async fn handle_network_msg( PeerMessage(remote, Versioned::V1(msg)) => { process_incoming_peer_message(ctx, state, remote, msg).await; }, - PeerMessage(_, Versioned::VStaging(_)) => { - unimplemented!("We only support collator protocol version 1."); - }, + PeerMessage(_, Versioned::VStaging(_)) => gum::warn!( + target: LOG_TARGET, + "Received message on invalid collator protocol version. Only v1 supported", + ), } Ok(()) From 24db36cf31570fb0fd44feaa6a067540e9c6a10e Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 10 Mar 2023 13:27:41 +0000 Subject: [PATCH 031/105] Metric updates Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 2 +- .../approval-distribution/src/metrics.rs | 30 +++++++++++++++---- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 07b0721da878..4d1854893651 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1071,7 +1071,7 @@ impl State { } // Invariant: to our knowledge, none of the peers except for the `source` know about the assignment. - metrics.on_assignment_imported(); + metrics.on_assignment_imported(&assignment.cert.kind); let topology = self.topologies.get_topology(entry.session); let local = source == MessageSource::Local; diff --git a/node/network/approval-distribution/src/metrics.rs b/node/network/approval-distribution/src/metrics.rs index 04ab02ea0130..ff11110a8199 100644 --- a/node/network/approval-distribution/src/metrics.rs +++ b/node/network/approval-distribution/src/metrics.rs @@ -15,6 +15,7 @@ // along with Polkadot. If not, see . use polkadot_node_metrics::metrics::{prometheus, Metrics as MetricsTrait}; +use polkadot_node_primitives::approval::AssignmentCertKindV2; /// Approval Distribution metrics. #[derive(Default, Clone)] @@ -22,7 +23,7 @@ pub struct Metrics(Option); #[derive(Clone)] struct MetricsInner { - assignments_imported_total: prometheus::Counter, + assignments_imported_total: prometheus::CounterVec, approvals_imported_total: prometheus::Counter, unified_with_peer_total: prometheus::Counter, // aggression_l1_messages_total: prometheus::Counter, @@ -32,10 +33,24 @@ struct MetricsInner { time_awaiting_approval_voting: prometheus::Histogram, } +trait AsLabel { + fn as_label(&self) -> &str; +} + +impl AsLabel for &AssignmentCertKindV2 { + fn as_label(&self) -> &str { + match self { + AssignmentCertKindV2::RelayVRFDelay { .. } => "VRF Delay", + AssignmentCertKindV2::RelayVRFModulo { .. } => "VRF Modulo", + AssignmentCertKindV2::RelayVRFModuloCompact { .. } => "VRF Modulo Compact", + } + } +} + impl Metrics { - pub(crate) fn on_assignment_imported(&self) { + pub(crate) fn on_assignment_imported(&self, kind: &AssignmentCertKindV2) { if let Some(metrics) = &self.0 { - metrics.assignments_imported_total.inc(); + metrics.assignments_imported_total.with_label_values(&[kind.as_label()]).inc(); } } @@ -88,9 +103,12 @@ impl MetricsTrait for Metrics { fn try_register(registry: &prometheus::Registry) -> Result { let metrics = MetricsInner { assignments_imported_total: prometheus::register( - prometheus::Counter::new( - "polkadot_parachain_assignments_imported_total", - "Number of valid assignments imported locally or from other peers.", + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_assignments_imported_total", + "Number of valid assignments imported locally or from other peers.", + ), + &["kind"], )?, registry, )?, From 741e91d4b4fc3f0d8f70ffce944aacffdc3cdfa5 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Fri, 31 Mar 2023 15:40:00 +0300 Subject: [PATCH 032/105] approval-voting: simplify v2 assignments (#6802) * Simplify v2 assignment cert Signed-off-by: Andrei Sandu * disable v2 assignments again Signed-off-by: Andrei Sandu * Review feedback Signed-off-by: Andrei Sandu * fix comment Signed-off-by: Andrei Sandu * doc update Signed-off-by: Andrei Sandu * db migration Signed-off-by: Andrei Sandu * Revert "disable v2 assignments again" This reverts commit 321b9b7a5288f44d7eccfc7b4abdc520e35e8d77. * Switch to using bitfield Signed-off-by: Andrei Sandu * Introduce AssignmentBitfield Signed-off-by: Andrei Sandu * Get rid of Vec Signed-off-by: Andrei Sandu * leftovers Signed-off-by: Andrei Sandu * Review feedback and code SPA Signed-off-by: Andrei Sandu * clippy Signed-off-by: Andrei Sandu * Disable v2 assignments, so node upgrade test can pass. Signed-off-by: Andrei Sandu * refactor bitfields Signed-off-by: Andrei Sandu * link issue in code Signed-off-by: Andrei Sandu --------- Signed-off-by: Andrei Sandu --- Cargo.lock | 3 + .../approval-voting/src/approval_db/v1/mod.rs | 11 +- node/core/approval-voting/src/criteria.rs | 238 ++++++++++------- node/core/approval-voting/src/lib.rs | 208 +++++++++------ .../approval-voting/src/persisted_entries.rs | 48 ++-- node/network/approval-distribution/Cargo.toml | 1 + node/network/approval-distribution/src/lib.rs | 147 ++++++----- node/network/bridge/src/rx/mod.rs | 2 +- node/network/protocol/src/lib.rs | 8 +- node/primitives/Cargo.toml | 1 + node/primitives/src/approval.rs | 242 ++++++++++++++++-- node/service/src/parachains_db/upgrade.rs | 55 +++- node/subsystem-types/Cargo.toml | 1 + node/subsystem-types/src/messages.rs | 9 +- .../src/node/approval/approval-voting.md | 2 + 15 files changed, 692 insertions(+), 284 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c679cb7b38ff..284b837f1d49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6428,6 +6428,7 @@ name = "polkadot-approval-distribution" version = "0.9.37" dependencies = [ "assert_matches", + "bitvec", "env_logger 0.9.0", "futures", "itertools", @@ -7156,6 +7157,7 @@ dependencies = [ name = "polkadot-node-primitives" version = "0.9.37" dependencies = [ + "bitvec", "bounded-vec", "futures", "parity-scale-codec", @@ -7206,6 +7208,7 @@ name = "polkadot-node-subsystem-types" version = "0.9.37" dependencies = [ "async-trait", + "bitvec", "derive_more", "futures", "orchestra", diff --git a/node/core/approval-voting/src/approval_db/v1/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs index 58781e76ce39..41f7760608f3 100644 --- a/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -17,7 +17,7 @@ //! Version 1 of the DB schema. use parity_scale_codec::{Decode, Encode}; -use polkadot_node_primitives::approval::{AssignmentCertV2, DelayTranche}; +use polkadot_node_primitives::approval::{v2::CoreBitfield, AssignmentCertV2, DelayTranche}; use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{ @@ -161,11 +161,16 @@ pub struct Config { /// Details pertaining to our assignment on a block. #[derive(Encode, Decode, Debug, Clone, PartialEq)] pub struct OurAssignment { + /// Our assignment certificate. pub cert: AssignmentCertV2, + /// The tranche for which the assignment refers to. pub tranche: DelayTranche, + /// Our validator index for the session in which the candidates were included. pub validator_index: ValidatorIndex, - // Whether the assignment has been triggered already. + /// Whether the assignment has been triggered already. pub triggered: bool, + /// A subset of the core indices obtained from the VRF output. + pub assignment_bitfield: CoreBitfield, } /// Metadata regarding a specific tranche of assignments for a specific candidate. @@ -186,7 +191,7 @@ pub struct ApprovalEntry { pub our_assignment: Option, pub our_approval_sig: Option, // `n_validators` bits. - pub assignments: Bitfield, + pub assigned_validators: Bitfield, pub approved: bool, } diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 599382acd98e..ec5da22044b7 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -18,8 +18,8 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ - self as approval_types, AssignmentCert, AssignmentCertKind, AssignmentCertKindV2, - AssignmentCertV2, DelayTranche, RelayVRFStory, + self as approval_types, v2::CoreBitfield, AssignmentCert, AssignmentCertKind, + AssignmentCertKindV2, AssignmentCertV2, DelayTranche, RelayVRFStory, }; use polkadot_primitives::{ AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, IndexedVec, SessionInfo, @@ -44,6 +44,8 @@ pub struct OurAssignment { validator_index: ValidatorIndex, // Whether the assignment has been triggered already. triggered: bool, + // The core indices obtained from the VRF output. + assignment_bitfield: CoreBitfield, } impl OurAssignment { @@ -66,16 +68,20 @@ impl OurAssignment { pub(crate) fn mark_triggered(&mut self) { self.triggered = true; } + + pub(crate) fn assignment_bitfield(&self) -> &CoreBitfield { + &self.assignment_bitfield + } } impl From for OurAssignment { - // TODO: OurAssignment changed -> migration for parachains db approval voting column. fn from(entry: crate::approval_db::v1::OurAssignment) -> Self { OurAssignment { cert: entry.cert, tranche: entry.tranche, validator_index: entry.validator_index, triggered: entry.triggered, + assignment_bitfield: entry.assignment_bitfield, } } } @@ -87,17 +93,41 @@ impl From for crate::approval_db::v1::OurAssignment { tranche: entry.tranche, validator_index: entry.validator_index, triggered: entry.triggered, + assignment_bitfield: entry.assignment_bitfield, } } } -fn relay_vrf_modulo_transcript(relay_vrf_story: RelayVRFStory, sample: u32) -> Transcript { - // combine the relay VRF story with a sample number. - let mut t = Transcript::new(approval_types::RELAY_VRF_MODULO_CONTEXT); - t.append_message(b"RC-VRF", &relay_vrf_story.0); - sample.using_encoded(|s| t.append_message(b"sample", s)); +// Combines the relay VRF story with a sample number if any. +fn relay_vrf_modulo_transcript_inner( + mut transcript: Transcript, + relay_vrf_story: RelayVRFStory, + sample: Option, +) -> Transcript { + transcript.append_message(b"RC-VRF", &relay_vrf_story.0); - t + if let Some(sample) = sample { + sample.using_encoded(|s| transcript.append_message(b"sample", s)); + } + + transcript +} + +fn relay_vrf_modulo_transcript_v1(relay_vrf_story: RelayVRFStory, sample: u32) -> Transcript { + relay_vrf_modulo_transcript_inner( + Transcript::new(approval_types::v1::RELAY_VRF_MODULO_CONTEXT), + relay_vrf_story, + Some(sample), + ) +} + +fn relay_vrf_modulo_transcript_v2(relay_vrf_story: RelayVRFStory) -> Transcript { + // combine the relay VRF story with a sample number. + relay_vrf_modulo_transcript_inner( + Transcript::new(approval_types::v2::RELAY_VRF_MODULO_CONTEXT), + relay_vrf_story, + None, + ) } /// A hard upper bound on num_cores * target_checkers / num_validators @@ -138,7 +168,7 @@ fn relay_vrf_modulo_cores( max_cores: u32, ) -> Vec { vrf_in_out - .make_bytes::(approval_types::CORE_RANDOMNESS_CONTEXT) + .make_bytes::(approval_types::v2::CORE_RANDOMNESS_CONTEXT) .0 .chunks_exact(4) .take(num_samples as usize) @@ -148,7 +178,7 @@ fn relay_vrf_modulo_cores( } fn relay_vrf_modulo_core(vrf_in_out: &VRFInOut, n_cores: u32) -> CoreIndex { - let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::CORE_RANDOMNESS_CONTEXT); + let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::v1::CORE_RANDOMNESS_CONTEXT); // interpret as little-endian u32. let random_core = u32::from_le_bytes(bytes) % n_cores; @@ -156,7 +186,7 @@ fn relay_vrf_modulo_core(vrf_in_out: &VRFInOut, n_cores: u32) -> CoreIndex { } fn relay_vrf_delay_transcript(relay_vrf_story: RelayVRFStory, core_index: CoreIndex) -> Transcript { - let mut t = Transcript::new(approval_types::RELAY_VRF_DELAY_CONTEXT); + let mut t = Transcript::new(approval_types::v1::RELAY_VRF_DELAY_CONTEXT); t.append_message(b"RC-VRF", &relay_vrf_story.0); core_index.0.using_encoded(|s| t.append_message(b"core", s)); t @@ -167,7 +197,7 @@ fn relay_vrf_delay_tranche( num_delay_tranches: u32, zeroth_delay_tranche_width: u32, ) -> DelayTranche { - let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::TRANCHE_RANDOMNESS_CONTEXT); + let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::v1::TRANCHE_RANDOMNESS_CONTEXT); // interpret as little-endian u32 and reduce by the number of tranches. let wide_tranche = @@ -178,17 +208,11 @@ fn relay_vrf_delay_tranche( } fn assigned_core_transcript(core_index: CoreIndex) -> Transcript { - let mut t = Transcript::new(approval_types::ASSIGNED_CORE_CONTEXT); + let mut t = Transcript::new(approval_types::v1::ASSIGNED_CORE_CONTEXT); core_index.0.using_encoded(|s| t.append_message(b"core", s)); t } -fn assigned_cores_transcript(core_indices: &Vec) -> Transcript { - let mut t = Transcript::new(approval_types::ASSIGNED_CORE_CONTEXT); - core_indices.using_encoded(|s| t.append_message(b"cores", s)); - t -} - /// Information about the world assignments are being produced in. #[derive(Clone, Debug)] pub(crate) struct Config { @@ -231,14 +255,15 @@ pub(crate) trait AssignmentCriteria { fn check_assignment_cert( &self, - claimed_core_index: Vec, + claimed_core_index: Option, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, assignment: &AssignmentCertV2, - // Backing groups for each assigned core `CoreIndex`. + // Backing groups for each "leaving core". backing_groups: Vec, - ) -> Result; + // TODO: maybe define record or something else than tuple + ) -> Result<(CoreBitfield, DelayTranche), InvalidAssignment>; } pub(crate) struct RealAssignmentCriteria; @@ -251,18 +276,18 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores, true) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) } fn check_assignment_cert( &self, - claimed_core_index: Vec, + claimed_core_index: Option, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, assignment: &AssignmentCertV2, backing_groups: Vec, - ) -> Result { + ) -> Result<(CoreBitfield, DelayTranche), InvalidAssignment> { check_assignment_cert( claimed_core_index, validator_index, @@ -396,7 +421,7 @@ fn compute_relay_vrf_modulo_assignments( // into closure. let core = &mut core; assignments_key.vrf_sign_extra_after_check( - relay_vrf_modulo_transcript(relay_vrf_story.clone(), rvm_sample), + relay_vrf_modulo_transcript_v1(relay_vrf_story.clone(), rvm_sample), |vrf_in_out| { *core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); if let Some((candidate_hash, _)) = @@ -436,6 +461,7 @@ fn compute_relay_vrf_modulo_assignments( tranche: 0, validator_index, triggered: false, + assignment_bitfield: core.into(), }); } } @@ -450,14 +476,10 @@ fn compute_relay_vrf_modulo_assignments_v2( assignments: &mut HashMap, ) { let mut assigned_cores = Vec::new(); - // for rvm_sample in 0..config.relay_vrf_modulo_samples { let maybe_assignment = { let assigned_cores = &mut assigned_cores; - assignments_key.vrf_sign_extra_after_check( - relay_vrf_modulo_transcript( - relay_vrf_story.clone(), - config.relay_vrf_modulo_samples - 1, - ), + assignments_key.vrf_sign_after_check( + relay_vrf_modulo_transcript_v2(relay_vrf_story.clone()), |vrf_in_out| { *assigned_cores = relay_vrf_modulo_cores( &vrf_in_out, @@ -476,12 +498,12 @@ fn compute_relay_vrf_modulo_assignments_v2( ?assigned_cores, ?validator_index, tranche = 0, - "RelayVRFModuloCompact Assignment." + "Produced RelayVRFModuloCompact Assignment." ); - Some(assigned_cores_transcript(assigned_cores)) + true } else { - None + false } }, ) @@ -489,18 +511,24 @@ fn compute_relay_vrf_modulo_assignments_v2( if let Some(assignment) = maybe_assignment.map(|(vrf_in_out, vrf_proof, _)| { let cert = AssignmentCertV2 { - kind: AssignmentCertKindV2::RelayVRFModuloCompact { - sample: config.relay_vrf_modulo_samples - 1, - core_indices: assigned_cores.clone(), - }, + kind: AssignmentCertKindV2::RelayVRFModuloCompact, vrf: ( approval_types::VRFOutput(vrf_in_out.to_output()), approval_types::VRFProof(vrf_proof), ), }; - // All assignments of type RelayVRFModulo have tranche 0. - OurAssignment { cert, tranche: 0, validator_index, triggered: false } + // All assignments of type RelayVRFModuloCompact have tranche 0. + OurAssignment { + cert, + tranche: 0, + validator_index, + triggered: false, + assignment_bitfield: assigned_cores + .clone() + .try_into() + .expect("Just checked `!assigned_cores.is_empty()`; qed"), + } }) { for core_index in assigned_cores { assignments.insert(core_index, assignment.clone()); @@ -534,7 +562,13 @@ fn compute_relay_vrf_delay_assignments( ), }; - let our_assignment = OurAssignment { cert, tranche, validator_index, triggered: false }; + let our_assignment = OurAssignment { + cert, + tranche, + validator_index, + triggered: false, + assignment_bitfield: core.into(), + }; let used = match assignments.entry(core) { Entry::Vacant(e) => { @@ -588,12 +622,14 @@ pub(crate) enum InvalidAssignmentReason { VRFDelayCoreIndexMismatch, VRFDelayOutputMismatch, InvalidArguments, + /// Assignment vrf check resulted in 0 assigned cores. + NullAssignment, } /// Checks the crypto of an assignment cert. Failure conditions: /// * Validator index out of bounds /// * VRF signature check fails -/// * VRF output doesn't match assigned core +/// * VRF output doesn't match assigned cores /// * Core is not covered by extra data in signature /// * Core index out of bounds /// * Sample is out of bounds @@ -601,14 +637,17 @@ pub(crate) enum InvalidAssignmentReason { /// /// This function does not check whether the core is actually a valid assignment or not. That should be done /// outside the scope of this function. +/// +/// For v2 assignments of type `AssignmentCertKindV2::RelayVRFModuloCompact` we don't need to pass +/// `claimed_core_index` it won't be used in the check. pub(crate) fn check_assignment_cert( - claimed_core_indices: Vec, + claimed_core_index: Option, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, assignment: &AssignmentCertV2, backing_groups: Vec, -) -> Result { +) -> Result<(CoreBitfield, DelayTranche), InvalidAssignment> { use InvalidAssignmentReason as Reason; let validator_public = config @@ -619,20 +658,14 @@ pub(crate) fn check_assignment_cert( let public = schnorrkel::PublicKey::from_bytes(validator_public.as_slice()) .map_err(|_| InvalidAssignment(Reason::InvalidAssignmentKey))?; - // Check that we have all backing groups for claimed cores. - if claimed_core_indices.is_empty() && claimed_core_indices.len() != backing_groups.len() { - return Err(InvalidAssignment(Reason::InvalidArguments)) - } - - // Check that the validator was not part of the backing group + // For v1 assignments Check that the validator was not part of the backing group // and not already assigned. - for (claimed_core, backing_group) in claimed_core_indices.iter().zip(backing_groups.iter()) { - if claimed_core.0 >= config.n_cores { + if let Some(claimed_core_index) = claimed_core_index.as_ref() { + if claimed_core_index.0 >= config.n_cores { return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds)) } - let is_in_backing = - is_in_backing_group(&config.validator_groups, validator_index, *backing_group); + is_in_backing_group(&config.validator_groups, validator_index, backing_groups[0]); if is_in_backing { return Err(InvalidAssignment(Reason::IsInBackingGroup)) @@ -641,72 +674,86 @@ pub(crate) fn check_assignment_cert( let &(ref vrf_output, ref vrf_proof) = &assignment.vrf; match &assignment.kind { - AssignmentCertKindV2::RelayVRFModuloCompact { sample, core_indices } => { - if *sample >= config.relay_vrf_modulo_samples { - return Err(InvalidAssignment(Reason::SampleOutOfBounds)) - } - + AssignmentCertKindV2::RelayVRFModuloCompact => { let (vrf_in_out, _) = public - .vrf_verify_extra( - relay_vrf_modulo_transcript(relay_vrf_story, *sample), + .vrf_verify( + relay_vrf_modulo_transcript_v2(relay_vrf_story), &vrf_output.0, &vrf_proof.0, - assigned_cores_transcript(core_indices), ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; - let resulting_cores = relay_vrf_modulo_cores(&vrf_in_out, *sample + 1, config.n_cores); - - // TODO: Enforce that all claimable cores are claimed, or include refused cores. - // Currently validators can opt out of checking specific cores. - // This is the same issue to how validator can opt out and not send their assignments in the first place. + // Get unique core assignments from the VRF wrt `config.n_cores`. + // Some of the core indices might be invalid, as there was no candidate included in the + // relay chain block for that core. + // + // The caller must check if the claimed candidate indices are valid + // and refer to the valid subset of cores outputed by the VRF here. + let vrf_unique_cores = relay_vrf_modulo_cores( + &vrf_in_out, + config.relay_vrf_modulo_samples, + config.n_cores, + ); - // Ensure that the `vrf_in_out` actually includes all of the claimed cores. - if claimed_core_indices + // Filter out cores in which the validator is in the backing group. + let resulting_cores = vrf_unique_cores .iter() - .fold(true, |cores_match, core| cores_match & resulting_cores.contains(core)) - { - Ok(0) - } else { - gum::debug!( - target: LOG_TARGET, - ?resulting_cores, - ?claimed_core_indices, - "Assignment claimed cores mismatch", - ); - Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) - } + .zip(backing_groups.iter()) + .filter_map(|(core, backing_group)| { + if is_in_backing_group( + &config.validator_groups, + validator_index, + *backing_group, + ) { + None + } else { + Some(*core) + } + }) + .collect::>(); + + CoreBitfield::try_from(resulting_cores) + .map(|bitfield| (bitfield, 0)) + .map_err(|_| InvalidAssignment(Reason::NullAssignment)) }, AssignmentCertKindV2::RelayVRFModulo { sample } => { if *sample >= config.relay_vrf_modulo_samples { return Err(InvalidAssignment(Reason::SampleOutOfBounds)) } + // This is a v1 assignment for which we need the core index. + let claimed_core_index = + claimed_core_index.ok_or(InvalidAssignment(Reason::InvalidArguments))?; + let (vrf_in_out, _) = public .vrf_verify_extra( - relay_vrf_modulo_transcript(relay_vrf_story, *sample), + relay_vrf_modulo_transcript_v1(relay_vrf_story, *sample), &vrf_output.0, &vrf_proof.0, - assigned_core_transcript(claimed_core_indices[0]), + assigned_core_transcript(claimed_core_index), ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; let core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); // ensure that the `vrf_in_out` actually gives us the claimed core. - if core == claimed_core_indices[0] { - Ok(0) + if core == claimed_core_index { + Ok((core.into(), 0)) } else { gum::debug!( target: LOG_TARGET, ?core, - ?claimed_core_indices, - "Assignment claimed cores mismatch", + ?claimed_core_index, + "Assignment claimed core mismatch", ); Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, AssignmentCertKindV2::RelayVRFDelay { core_index } => { - if *core_index != claimed_core_indices[0] { + // This is a v1 assignment for which we need the core index. + let claimed_core_index = + claimed_core_index.ok_or(InvalidAssignment(Reason::InvalidArguments))?; + + if *core_index != claimed_core_index { return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch)) } @@ -718,10 +765,13 @@ pub(crate) fn check_assignment_cert( ) .map_err(|_| InvalidAssignment(Reason::VRFDelayOutputMismatch))?; - Ok(relay_vrf_delay_tranche( - &vrf_in_out, - config.n_delay_tranches, - config.zeroth_delay_tranche_width, + Ok(( + (*core_index).into(), + relay_vrf_delay_tranche( + &vrf_in_out, + config.n_delay_tranches, + config.zeroth_delay_tranche_width, + ), )) }, } diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 9777bdab97f1..81f5708fdf86 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -24,8 +24,9 @@ use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ - AssignmentCertKindV2, AssignmentCertV2, BlockApprovalMeta, DelayTranche, - IndirectAssignmentCertV2, IndirectSignedApprovalVote, + v2::{BitfieldError, CandidateBitfield, CoreBitfield}, + AssignmentCertKindV2, BlockApprovalMeta, DelayTranche, IndirectAssignmentCertV2, + IndirectSignedApprovalVote, }, ValidationResult, APPROVAL_EXECUTION_TIMEOUT, }; @@ -76,6 +77,7 @@ use std::{ }; use approval_checking::RequiredTranches; +use bitvec::{order::Lsb0, vec::BitVec}; use criteria::{AssignmentCriteria, RealAssignmentCriteria}; use persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}; use time::{slot_number_to_tick, Clock, ClockExt, SystemClock, Tick}; @@ -741,11 +743,11 @@ enum Action { tick: Tick, }, LaunchApproval { + claimed_core_indices: CoreBitfield, candidate_hash: CandidateHash, indirect_cert: IndirectAssignmentCertV2, assignment_tranche: DelayTranche, relay_block_hash: Hash, - candidate_index: CandidateIndex, session: SessionIndex, candidate: CandidateReceipt, backing_group: GroupIndex, @@ -956,11 +958,11 @@ async fn handle_actions( actions_iter = next_actions.into_iter(); }, Action::LaunchApproval { + claimed_core_indices, candidate_hash, indirect_cert, assignment_tranche, relay_block_hash, - candidate_index, session, candidate, backing_group, @@ -984,14 +986,27 @@ async fn handle_actions( }, }; - // Get all candidate indices in case this is a compact module vrf assignment. - let candidate_indices = - cores_to_candidate_indices(&block_entry, candidate_index, &indirect_cert.cert); - - ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment( - indirect_cert, - candidate_indices, - )); + // Get an assignment bitfield for the given claimed cores. + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(bitfield) => { + ctx.send_unbounded_message( + ApprovalDistributionMessage::DistributeAssignment( + indirect_cert, + bitfield, + ), + ); + }, + Err(err) => { + // Never happens, it should only happen if no cores are claimed, which is a bug. + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?err, + "Failed to create assignment bitfield" + ); + continue + }, + }; match approvals_cache.get(&candidate_hash) { Some(ApprovalOutcome::Approved) => { @@ -1049,26 +1064,23 @@ async fn handle_actions( } fn cores_to_candidate_indices( + core_indices: &CoreBitfield, block_entry: &BlockEntry, - candidate_index: CandidateIndex, - cert: &AssignmentCertV2, -) -> Vec { +) -> Result { let mut candidate_indices = Vec::new(); - match &cert.kind { - AssignmentCertKindV2::RelayVRFModuloCompact { sample: _, core_indices } => { - for cert_core_index in core_indices { - if let Some(candidate_index) = block_entry - .candidates() - .iter() - .position(|(core_index, _)| core_index == cert_core_index) - { - candidate_indices.push(candidate_index as _) - } - } - }, - _ => candidate_indices.push(candidate_index as _), + + // Map from core index to candidate index. + for claimed_core_index in core_indices.iter_ones() { + if let Some(candidate_index) = block_entry + .candidates() + .iter() + .position(|(core_index, _)| core_index.0 == claimed_core_index as u32) + { + candidate_indices.push(candidate_index as CandidateIndex); + } } - candidate_indices + + CandidateBitfield::try_from(candidate_indices) } fn distribution_messages_for_activation( @@ -1119,24 +1131,59 @@ fn distribution_messages_for_activation( match approval_entry.local_statements() { (None, None) | (None, Some(_)) => {}, // second is impossible case. (Some(assignment), None) => { - messages.push(ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), + match cores_to_candidate_indices( + assignment.assignment_bitfield(), + &block_entry, + ) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }, + bitfield, + ), + ), + Err(err) => { + // Should never happen. If we fail here it means the assignment is null (no cores claimed). + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); }, - cores_to_candidate_indices(&block_entry, i as _, assignment.cert()), - )); + } }, (Some(assignment), Some(approval_sig)) => { - messages.push(ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), + match cores_to_candidate_indices( + assignment.assignment_bitfield(), + &block_entry, + ) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }, + bitfield, + ), + ), + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + // If we didn't send assignment, we don't send approval. + continue }, - cores_to_candidate_indices(&block_entry, i as _, assignment.cert()), - )); + } messages.push(ApprovalDistributionMessage::DistributeApproval( IndirectSignedApprovalVote { @@ -1385,8 +1432,6 @@ async fn handle_approved_ancestor( const MAX_TRACING_WINDOW: usize = 200; const ABNORMAL_DEPTH_THRESHOLD: usize = 5; - use bitvec::{order::Lsb0, vec::BitVec}; - let mut span = jaeger::Span::new(&target, "approved-ancestor").with_stage(jaeger::Stage::ApprovalChecking); @@ -1712,7 +1757,7 @@ fn check_and_import_assignment( state: &State, db: &mut OverlayedBackend<'_, impl Backend>, assignment: IndirectAssignmentCertV2, - candidate_indices: Vec, + candidate_indices: CandidateBitfield, ) -> SubsystemResult<(AssignmentCheckResult, Vec)> { let tick_now = state.clock.tick_now(); @@ -1743,14 +1788,14 @@ fn check_and_import_assignment( let mut claimed_core_indices = Vec::new(); let mut assigned_candidate_hashes = Vec::new(); - for candidate_index in candidate_indices.iter() { + for candidate_index in candidate_indices.iter_ones() { let (claimed_core_index, assigned_candidate_hash) = - match block_entry.candidate(*candidate_index as usize) { + match block_entry.candidate(candidate_index) { Some((c, h)) => (*c, *h), None => return Ok(( AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidateIndex( - *candidate_index, + candidate_index as _, )), Vec::new(), )), // no candidate at core. @@ -1761,7 +1806,7 @@ fn check_and_import_assignment( None => return Ok(( AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( - *candidate_index, + candidate_index as _, assigned_candidate_hash, )), Vec::new(), @@ -1785,9 +1830,18 @@ fn check_and_import_assignment( assigned_candidate_hashes.push(assigned_candidate_hash); } + let claimed_core_index = match assignment.cert.kind { + // TODO: remove CoreIndex from certificates completely. + // https://github.com/paritytech/polkadot/issues/6988 + AssignmentCertKindV2::RelayVRFDelay { .. } => Some(claimed_core_indices[0]), + AssignmentCertKindV2::RelayVRFModulo { .. } => Some(claimed_core_indices[0]), + // VRelayVRFModuloCompact assignment doesn't need the the claimed cores for checking. + AssignmentCertKindV2::RelayVRFModuloCompact => None, + }; + // Check the assignment certificate. let res = state.assignment_criteria.check_assignment_cert( - claimed_core_indices.clone(), + claimed_core_index, assignment.validator, &criteria::Config::from(session_info), block_entry.relay_vrf_story(), @@ -1795,7 +1849,7 @@ fn check_and_import_assignment( backing_groups, ); - let tranche = match res { + let (claimed_core_indices, tranche) = match res { Err(crate::criteria::InvalidAssignment(reason)) => return Ok(( AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCert( @@ -1804,7 +1858,7 @@ fn check_and_import_assignment( )), Vec::new(), )), - Ok(tranche) => { + Ok((claimed_core_indices, tranche)) => { let current_tranche = state.clock.tranche_now(state.slot_duration_millis, block_entry.slot()); @@ -1814,7 +1868,7 @@ fn check_and_import_assignment( return Ok((AssignmentCheckResult::TooFarInFuture, Vec::new())) } - tranche + (claimed_core_indices, tranche) }, }; @@ -1823,14 +1877,14 @@ fn check_and_import_assignment( let mut is_duplicate = false; // Import the assignments for all cores in the cert. for (assigned_candidate_hash, candidate_index) in - assigned_candidate_hashes.iter().zip(candidate_indices) + assigned_candidate_hashes.iter().zip(candidate_indices.iter_ones()) { let mut candidate_entry = match db.load_candidate_entry(&assigned_candidate_hash)? { Some(c) => c, None => return Ok(( AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( - candidate_index, + candidate_index as _, *assigned_candidate_hash, )), Vec::new(), @@ -2294,34 +2348,28 @@ fn process_wakeup( None }; - if let Some((cert, val_index, tranche)) = maybe_cert { + if let Some((claimed_core_indices, cert, val_index, tranche)) = maybe_cert { let indirect_cert = IndirectAssignmentCertV2 { block_hash: relay_block, validator: val_index, cert }; - let index_in_candidate = - block_entry.candidates().iter().position(|(_, h)| &candidate_hash == h); - - if let Some(i) = index_in_candidate { - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - para_id = ?candidate_receipt.descriptor.para_id, - block_hash = ?relay_block, - "Launching approval work.", - ); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + para_id = ?candidate_receipt.descriptor.para_id, + block_hash = ?relay_block, + "Launching approval work.", + ); - // sanity: should always be present. - actions.push(Action::LaunchApproval { - candidate_hash, - indirect_cert, - assignment_tranche: tranche, - relay_block_hash: relay_block, - candidate_index: i as _, - session: block_entry.session(), - candidate: candidate_receipt, - backing_group, - }); - } + actions.push(Action::LaunchApproval { + claimed_core_indices, + candidate_hash, + indirect_cert, + assignment_tranche: tranche, + relay_block_hash: relay_block, + session: block_entry.session(), + candidate: candidate_receipt, + backing_group, + }); } // Although we checked approval earlier in this function, diff --git a/node/core/approval-voting/src/persisted_entries.rs b/node/core/approval-voting/src/persisted_entries.rs index 91e7a381d637..6c5cb0de53a3 100644 --- a/node/core/approval-voting/src/persisted_entries.rs +++ b/node/core/approval-voting/src/persisted_entries.rs @@ -20,16 +20,20 @@ //! Within that context, things are plain-old-data. Within this module, //! data and logic are intertwined. -use polkadot_node_primitives::approval::{AssignmentCertV2, DelayTranche, RelayVRFStory}; +use polkadot_node_primitives::approval::{ + v2::CoreBitfield, AssignmentCertV2, DelayTranche, RelayVRFStory, +}; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; -use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice, vec::BitVec}; +use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice}; use std::collections::BTreeMap; +use crate::approval_db::v1::Bitfield; + use super::{criteria::OurAssignment, time::Tick}; /// Metadata regarding a specific tranche of assignments for a specific candidate. @@ -80,7 +84,7 @@ pub struct ApprovalEntry { our_assignment: Option, our_approval_sig: Option, // `n_validators` bits. - assignments: BitVec, + assigned_validators: Bitfield, approved: bool, } @@ -92,10 +96,17 @@ impl ApprovalEntry { our_assignment: Option, our_approval_sig: Option, // `n_validators` bits. - assignments: BitVec, + assigned_validators: Bitfield, approved: bool, ) -> Self { - Self { tranches, backing_group, our_assignment, our_approval_sig, assignments, approved } + Self { + tranches, + backing_group, + our_assignment, + our_approval_sig, + assigned_validators, + approved, + } } // Access our assignment for this approval entry. @@ -107,7 +118,7 @@ impl ApprovalEntry { pub fn trigger_our_assignment( &mut self, tick_now: Tick, - ) -> Option<(AssignmentCertV2, ValidatorIndex, DelayTranche)> { + ) -> Option<(CoreBitfield, AssignmentCertV2, ValidatorIndex, DelayTranche)> { let our = self.our_assignment.as_mut().and_then(|a| { if a.triggered() { return None @@ -120,7 +131,7 @@ impl ApprovalEntry { our.map(|a| { self.import_assignment(a.tranche(), a.validator_index(), tick_now); - (a.cert().clone(), a.validator_index(), a.tranche()) + (a.assignment_bitfield().clone(), a.cert().clone(), a.validator_index(), a.tranche()) }) } @@ -131,7 +142,10 @@ impl ApprovalEntry { /// Whether a validator is already assigned. pub fn is_assigned(&self, validator_index: ValidatorIndex) -> bool { - self.assignments.get(validator_index.0 as usize).map(|b| *b).unwrap_or(false) + self.assigned_validators + .get(validator_index.0 as usize) + .map(|b| *b) + .unwrap_or(false) } /// Import an assignment. No-op if already assigned on the same tranche. @@ -158,14 +172,14 @@ impl ApprovalEntry { }; self.tranches[idx].assignments.push((validator_index, tick_now)); - self.assignments.set(validator_index.0 as _, true); + self.assigned_validators.set(validator_index.0 as _, true); } // Produce a bitvec indicating the assignments of all validators up to and // including `tranche`. - pub fn assignments_up_to(&self, tranche: DelayTranche) -> BitVec { + pub fn assignments_up_to(&self, tranche: DelayTranche) -> Bitfield { self.tranches.iter().take_while(|e| e.tranche <= tranche).fold( - bitvec::bitvec![u8, BitOrderLsb0; 0; self.assignments.len()], + bitvec::bitvec![u8, BitOrderLsb0; 0; self.assigned_validators.len()], |mut a, e| { for &(v, _) in &e.assignments { a.set(v.0 as _, true); @@ -193,12 +207,12 @@ impl ApprovalEntry { /// Get the number of validators in this approval entry. pub fn n_validators(&self) -> usize { - self.assignments.len() + self.assigned_validators.len() } /// Get the number of assignments by validators, including the local validator. pub fn n_assignments(&self) -> usize { - self.assignments.count_ones() + self.assigned_validators.count_ones() } /// Get the backing group index of the approval entry. @@ -226,7 +240,7 @@ impl From for ApprovalEntry { backing_group: entry.backing_group, our_assignment: entry.our_assignment.map(Into::into), our_approval_sig: entry.our_approval_sig.map(Into::into), - assignments: entry.assignments, + assigned_validators: entry.assigned_validators, approved: entry.approved, } } @@ -239,7 +253,7 @@ impl From for crate::approval_db::v1::ApprovalEntry { backing_group: entry.backing_group, our_assignment: entry.our_assignment.map(Into::into), our_approval_sig: entry.our_approval_sig.map(Into::into), - assignments: entry.assignments, + assigned_validators: entry.assigned_validators, approved: entry.approved, } } @@ -253,7 +267,7 @@ pub struct CandidateEntry { // Assignments are based on blocks, so we need to track assignments separately // based on the block we are looking at. pub block_assignments: BTreeMap, - pub approvals: BitVec, + pub approvals: Bitfield, } impl CandidateEntry { @@ -336,7 +350,7 @@ pub struct BlockEntry { // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. // The i'th bit is `true` iff the candidate has been approved in the context of this // block. The block can be considered approved if the bitfield has all bits set to `true`. - pub approved_bitfield: BitVec, + pub approved_bitfield: Bitfield, pub children: Vec, } diff --git a/node/network/approval-distribution/Cargo.toml b/node/network/approval-distribution/Cargo.toml index 3e1069334056..ef8bbfdb778c 100644 --- a/node/network/approval-distribution/Cargo.toml +++ b/node/network/approval-distribution/Cargo.toml @@ -15,6 +15,7 @@ itertools = "0.10.5" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } +bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } [dev-dependencies] sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 4d1854893651..8be931dee135 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -32,6 +32,7 @@ use polkadot_node_network_protocol::{ Versioned, View, }; use polkadot_node_primitives::approval::{ + v2::{AsBitIndex, CandidateBitfield}, BlockApprovalMeta, IndirectAssignmentCertV2, IndirectSignedApprovalVote, }; use polkadot_node_subsystem::{ @@ -104,8 +105,8 @@ struct ApprovalRouting { struct ApprovalEntry { // The assignment certificate. assignment: IndirectAssignmentCertV2, - // The candidates claimed by the certificate. - candidates: HashSet, + // The candidates claimed by the certificate. A mapping between bit index and candidate index. + candidates: CandidateBitfield, // The approval signatures for each `CandidateIndex` claimed by the assignment certificate. approvals: HashMap, // The validator index of the assignment signer. @@ -114,17 +115,25 @@ struct ApprovalEntry { routing_info: ApprovalRouting, } +#[derive(Debug)] +enum ApprovalEntryError { + InvalidValidatorIndex, + CandidateIndexOutOfBounds, + InvalidCandidateIndex, + DuplicateApproval, +} + impl ApprovalEntry { pub fn new( assignment: IndirectAssignmentCertV2, - candidates: Vec, + candidates: CandidateBitfield, routing_info: ApprovalRouting, ) -> ApprovalEntry { Self { validator_index: assignment.validator, assignment, approvals: HashMap::with_capacity(candidates.len()), - candidates: HashSet::from_iter(candidates.into_iter()), + candidates, routing_info, } } @@ -132,23 +141,19 @@ impl ApprovalEntry { // Create a `MessageSubject` to reference the assignment. pub fn create_assignment_knowledge(&self, block_hash: Hash) -> (MessageSubject, MessageKind) { ( - MessageSubject( - block_hash, - self.candidates.iter().cloned().collect::>(), - self.validator_index, - ), + MessageSubject(block_hash, self.candidates.clone(), self.validator_index), MessageKind::Assignment, ) } - // Create a `MessageSubject` to reference the assignment. + // Create a `MessageSubject` to reference the approval. pub fn create_approval_knowledge( &self, block_hash: Hash, candidate_index: CandidateIndex, ) -> (MessageSubject, MessageKind) { ( - MessageSubject(block_hash, vec![candidate_index], self.validator_index), + MessageSubject(block_hash, candidate_index.into(), self.validator_index), MessageKind::Approval, ) } @@ -169,28 +174,37 @@ impl ApprovalEntry { } // Records a new approval. Returns false if the claimed candidate is not found or we already have received the approval. - // TODO: use specific errors instead of `bool`. - pub fn note_approval(&mut self, approval: IndirectSignedApprovalVote) -> bool { + pub fn note_approval( + &mut self, + approval: IndirectSignedApprovalVote, + ) -> Result<(), ApprovalEntryError> { // First do some sanity checks: // - check validator index matches // - check claimed candidate // - check for duplicate approval if self.validator_index != approval.validator { - return false + return Err(ApprovalEntryError::InvalidValidatorIndex) } - if !self.candidates.contains(&approval.candidate_index) || - self.approvals.contains_key(&approval.candidate_index) - { - return false + if self.candidates.len() <= approval.candidate_index as usize { + return Err(ApprovalEntryError::CandidateIndexOutOfBounds) } - self.approvals.insert(approval.candidate_index, approval).is_none() + if !self.candidates.bit_at(approval.candidate_index.as_bit_index()) { + return Err(ApprovalEntryError::InvalidCandidateIndex) + } + + if self.approvals.contains_key(&approval.candidate_index) { + return Err(ApprovalEntryError::DuplicateApproval) + } + + self.approvals.insert(approval.candidate_index, approval); + Ok(()) } // Get the assignment certiticate and claimed candidates. - pub fn get_assignment(&self) -> (IndirectAssignmentCertV2, Vec) { - (self.assignment.clone(), self.candidates.iter().cloned().collect::>()) + pub fn get_assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { + (self.assignment.clone(), self.candidates.clone()) } // Get all approvals for all candidates claimed by the assignment. @@ -248,7 +262,7 @@ enum MessageKind { // Assignments can span multiple candidates, while approvals refer to only one candidate. // #[derive(Debug, Clone, Hash, PartialEq, Eq)] -struct MessageSubject(Hash, pub Vec, ValidatorIndex); +struct MessageSubject(Hash, pub CandidateBitfield, ValidatorIndex); #[derive(Debug, Clone, Default)] struct Knowledge { @@ -290,14 +304,18 @@ impl Knowledge { // In case of succesful insertion of multiple candidate assignments create additional // entries for each assigned candidate. This fakes knowledge of individual assignments, but // we need to share the same `MessageSubject` with the followup approval candidate index. - if kind == MessageKind::Assignment && success && message.1.len() > 1 { - message.1.iter().fold(success, |success, candidate_index| { - success & - self.insert( - MessageSubject(message.0.clone(), vec![*candidate_index], message.2), - kind, - ) - }) + if kind == MessageKind::Assignment && success && message.1.count_ones() > 1 { + message + .1 + .iter_ones() + .map(|candidate_index| candidate_index as CandidateIndex) + .fold(success, |success, candidate_index| { + success & + self.insert( + MessageSubject(message.0, candidate_index.into(), message.2), + kind, + ) + }) } else { success } @@ -336,7 +354,7 @@ struct BlockEntry { pub session: SessionIndex, /// Approval entries for whole block. These also contain all approvals in the cae of multiple candidates /// being claimed by assignments. - approval_entries: HashMap<(ValidatorIndex, Vec), ApprovalEntry>, + approval_entries: HashMap<(ValidatorIndex, CandidateBitfield), ApprovalEntry>, } impl BlockEntry { @@ -349,13 +367,13 @@ impl BlockEntry { // First map one entry per candidate to the same key we will use in `approval_entries`. // Key is (Validator_index, Vec) that links the `ApprovalEntry` to the (K,V) // entry in `candidate_entry.messages`. - for claimed_candidate_index in &entry.candidates { - match self.candidates.get_mut(*claimed_candidate_index as usize) { + for claimed_candidate_index in entry.candidates.iter_ones() { + match self.candidates.get_mut(claimed_candidate_index) { Some(candidate_entry) => { candidate_entry .messages .entry(entry.get_validator_index()) - .or_insert(entry.candidates.iter().cloned().collect::>()); + .or_insert(entry.candidates.clone()); }, None => { // This should never happen, but if it happens, it means the subsystem is broken. @@ -370,10 +388,7 @@ impl BlockEntry { } self.approval_entries - .entry(( - entry.validator_index, - entry.candidates.clone().into_iter().collect::>(), - )) + .entry((entry.validator_index, entry.candidates.clone())) .or_insert(entry) } @@ -441,7 +456,7 @@ impl BlockEntry { #[derive(Debug, Default)] struct CandidateEntry { // The value represents part of the lookup key in `approval_entries` to fetch the assignment and existing votes. - messages: HashMap>, + messages: HashMap, } #[derive(Debug, Clone, PartialEq)] @@ -460,7 +475,7 @@ impl MessageSource { } enum PendingMessage { - Assignment(IndirectAssignmentCertV2, Vec), + Assignment(IndirectAssignmentCertV2, CandidateBitfield), Approval(IndirectSignedApprovalVote), } @@ -686,7 +701,7 @@ impl State { ctx: &mut Context, metrics: &Metrics, peer_id: PeerId, - assignments: Vec<(IndirectAssignmentCertV2, Vec)>, + assignments: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, rng: &mut R, ) where R: CryptoRng + Rng, @@ -761,7 +776,7 @@ impl State { peer_id, assignments .into_iter() - .map(|(cert, candidate)| (cert.into(), vec![candidate])) + .map(|(cert, candidate)| (cert.into(), candidate.into())) .collect::>(), rng, ) @@ -907,7 +922,7 @@ impl State { metrics: &Metrics, source: MessageSource, assignment: IndirectAssignmentCertV2, - claimed_candidate_indices: Vec, + claimed_candidate_indices: CandidateBitfield, rng: &mut R, ) where R: CryptoRng + Rng, @@ -955,6 +970,15 @@ impl State { "Duplicate assignment", ); modify_reputation(ctx.sender(), peer_id, COST_DUPLICATE_MESSAGE).await; + } else { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + hash = ?block_hash, + ?validator_index, + ?message_subject, + "We sent the message to the peer while peer was sending it to us. Known race condition.", + ); } return } @@ -1112,7 +1136,7 @@ impl State { .as_ref() .map(|t| t.local_grid_neighbors().route_to_peer(required_routing, &peer)) { - peers.push(peer.clone()); + peers.push(peer); continue } @@ -1124,7 +1148,7 @@ impl State { if route_random { approval_entry.routing_info_mut().random_routing.inc_sent(); - peers.push(peer.clone()); + peers.push(peer); } } @@ -1149,9 +1173,7 @@ impl State { let peers = peers .iter() .filter_map(|peer_id| { - self.peer_views - .get(peer_id) - .map(|peer_entry| (peer_id.clone(), peer_entry.version)) + self.peer_views.get(peer_id).map(|peer_entry| (*peer_id, peer_entry.version)) }) .collect::>(); @@ -1183,7 +1205,7 @@ impl State { }; // compute metadata on the assignment. - let message_subject = MessageSubject(block_hash, vec![candidate_index], validator_index); + let message_subject = MessageSubject(block_hash, candidate_index.into(), validator_index); let message_kind = MessageKind::Approval; if let Some(peer_id) = source.peer_id() { @@ -1317,7 +1339,7 @@ impl State { // Invariant: to our knowledge, none of the peers except for the `source` know about the approval. metrics.on_approval_imported(); - if !approval_entry.note_approval(vote.clone()) { + if let Err(err) = approval_entry.note_approval(vote.clone()) { // this would indicate a bug in approval-voting: // - validator index mismatch // - candidate index mismatch @@ -1327,7 +1349,8 @@ impl State { hash = ?block_hash, ?candidate_index, ?validator_index, - "Possible bug: Vote import failed: validator/candidate index mismatch or duplicate", + ?err, + "Possible bug: Vote import failed", ); return @@ -1429,13 +1452,12 @@ impl State { let sigs = block_entry .get_approval_entries(index) .into_iter() - .map(|approval_entry| { + .flat_map(|approval_entry| { approval_entry .get_approvals() .into_iter() .map(|approval| (approval.validator, approval.signature)) }) - .flatten() .collect::>(); all_sigs.extend(sigs); } @@ -1942,7 +1964,7 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( // Low level helper for sending assignments. async fn send_assignments_batched_inner( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - batch: Vec<(IndirectAssignmentCertV2, Vec)>, + batch: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, peers: &Vec, // TODO: use `ValidationVersion`. peer_version: u32, @@ -1962,7 +1984,18 @@ async fn send_assignments_batched_inner( // `IndirectAssignmentCertV2` -> `IndirectAssignmentCert` let batch = batch .into_iter() - .filter_map(|(cert, candidates)| cert.try_into().ok().map(|cert| (cert, candidates[0]))) + .filter_map(|(cert, candidates)| { + cert.try_into().ok().map(|cert| { + ( + cert, + // First 1 bit index is the candidate index. + candidates + .first_one() + .map(|index| index as CandidateIndex) + .expect("Assignment was checked for not being empty; qed"), + ) + }) + }) .collect(); sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( @@ -1982,7 +2015,7 @@ async fn send_assignments_batched_inner( /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - v2_assignments: Vec<(IndirectAssignmentCertV2, Vec)>, + v2_assignments: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, peers: &Vec<(PeerId, ProtocolVersion)>, ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); @@ -1991,7 +2024,7 @@ pub(crate) async fn send_assignments_batched( if v1_peers.len() > 0 { let mut v1_assignments = v2_assignments.clone(); // Older peers(v1) do not understand `AssignmentsV2` messages, so we have to filter these out. - v1_assignments.retain(|(_, candidates)| candidates.len() == 1); + v1_assignments.retain(|(_, candidates)| candidates.count_ones() == 1); let mut v1_batches = v1_assignments.into_iter().peekable(); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 7408a0b25c71..d480f9c4a781 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -755,7 +755,7 @@ fn update_our_view( shared .validation_peers .iter() - .map(|(peer_id, peer_data)| (peer_id.clone(), peer_data.version)) + .map(|(peer_id, peer_data)| (*peer_id, peer_data.version)) .collect::>(), shared.collation_peers.keys().cloned().collect::>(), ) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 90807558b255..46f432997c8b 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -428,11 +428,9 @@ impl_versioned_try_from!( pub mod vstaging { use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ - IndirectAssignmentCertV2, IndirectSignedApprovalVote, + v2::CandidateBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVote, }; - use polkadot_primitives::CandidateIndex; - // Re-export stuff that has not changed since v1. pub use crate::v1::{ declare_signature_payload, BitfieldDistributionMessage, CollationProtocol, @@ -461,10 +459,12 @@ pub mod vstaging { #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum ApprovalDistributionMessage { /// Assignments for candidates in recent, unfinalized blocks. + /// We use a bitfield to reference claimed candidates, where the bit index is equal to candidate index. /// /// Actually checking the assignment may yield a different result. + /// TODO: Look at getting rid of bitfield in the future. #[codec(index = 0)] - Assignments(Vec<(IndirectAssignmentCertV2, Vec)>), + Assignments(Vec<(IndirectAssignmentCertV2, CandidateBitfield)>), /// Approvals for candidates in some recent, unfinalized block. #[codec(index = 1)] Approvals(Vec), diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml index c6812d2cc02c..0919a99d3ffb 100644 --- a/node/primitives/Cargo.toml +++ b/node/primitives/Cargo.toml @@ -20,6 +20,7 @@ polkadot-parachain = { path = "../../parachain", default-features = false } schnorrkel = "0.9.1" thiserror = "1.0.31" serde = { version = "1.0.137", features = ["derive"] } +bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.11.2", default-features = false } diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index e51111e6e4c8..d7e7be861aab 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -31,24 +31,168 @@ use sp_consensus_babe as babe_primitives; /// Earlier tranches of validators check first, with later tranches serving as backup. pub type DelayTranche = u32; -/// A static context used to compute the Relay VRF story based on the -/// VRF output included in the header-chain. -pub const RELAY_VRF_STORY_CONTEXT: &[u8] = b"A&V RC-VRF"; +/// Static contexts use to generate randomness for v1 assignments. +pub mod v1 { + /// A static context used to compute the Relay VRF story based on the + /// VRF output included in the header-chain. + pub const RELAY_VRF_STORY_CONTEXT: &[u8] = b"A&V RC-VRF"; -/// A static context used for all relay-vrf-modulo VRFs. -pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD"; + /// A static context used for all relay-vrf-modulo VRFs. + pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD"; -/// A static context used for all relay-vrf-modulo VRFs. -pub const RELAY_VRF_DELAY_CONTEXT: &[u8] = b"A&V DELAY"; + /// A static context used for all relay-vrf-modulo VRFs. + pub const RELAY_VRF_DELAY_CONTEXT: &[u8] = b"A&V DELAY"; -/// A static context used for transcripts indicating assigned availability core. -pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED"; + /// A static context used for transcripts indicating assigned availability core. + pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED"; -/// A static context associated with producing randomness for a core. -pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE"; + /// A static context associated with producing randomness for a core. + pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE"; -/// A static context associated with producing randomness for a tranche. -pub const TRANCHE_RANDOMNESS_CONTEXT: &[u8] = b"A&V TRANCHE"; + /// A static context associated with producing randomness for a tranche. + pub const TRANCHE_RANDOMNESS_CONTEXT: &[u8] = b"A&V TRANCHE"; +} + +/// A list of primitives introduced by v2. +pub mod v2 { + use parity_scale_codec::{Decode, Encode}; + use std::ops::BitOr; + + use super::{CandidateIndex, CoreIndex}; + use bitvec::{prelude::Lsb0, vec::BitVec}; + + /// A static context associated with producing randomness for a core. + pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE v2"; + /// A static context associated with producing randomness for v2 multi-core assignments. + pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED v2"; + /// A static context used for all relay-vrf-modulo VRFs for v2 multi-core assignments. + pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD v2"; + /// A read-only bitvec wrapper + #[derive(Clone, Debug, Encode, Decode, Hash, PartialEq, Eq)] + pub struct Bitfield(BitVec, std::marker::PhantomData); + + /// A `read-only`, `non-zero` bitfield. + /// Each 1 bit identifies a candidate by the bitfield bit index. + pub type CandidateBitfield = Bitfield; + /// A bitfield of core assignments. + pub type CoreBitfield = Bitfield; + + /// Errors that can occur when creating and manipulating bitfields. + #[derive(Debug)] + pub enum BitfieldError { + /// All bits are zero. + NullAssignment, + } + + /// A bit index in `Bitfield`. + #[cfg_attr(test, derive(PartialEq, Clone))] + pub struct BitIndex(pub usize); + + /// Helper trait to convert primitives to `BitIndex`. + pub trait AsBitIndex { + /// Returns the index of the corresponding bit in `Bitfield`. + fn as_bit_index(&self) -> BitIndex; + } + + impl Bitfield { + /// Returns the bit value at specified `index`. If `index` is greater than bitfield size, + /// returns `false`. + pub fn bit_at(&self, index: BitIndex) -> bool { + if self.0.len() <= index.0 { + false + } else { + self.0[index.0] + } + } + + /// Returns number of bits. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns the number of 1 bits. + pub fn count_ones(&self) -> usize { + self.0.count_ones() + } + + /// Returns the index of the first 1 bit. + pub fn first_one(&self) -> Option { + self.0.first_one() + } + + /// Returns an iterator over inner bits. + pub fn iter_ones(&self) -> bitvec::slice::IterOnes { + self.0.iter_ones() + } + + /// For testing purpose, we want a inner mutable ref. + #[cfg(test)] + pub fn inner_mut(&mut self) -> &mut BitVec { + &mut self.0 + } + } + + impl AsBitIndex for CandidateIndex { + fn as_bit_index(&self) -> BitIndex { + BitIndex(*self as usize) + } + } + + impl AsBitIndex for CoreIndex { + fn as_bit_index(&self) -> BitIndex { + BitIndex(self.0 as usize) + } + } + + impl AsBitIndex for usize { + fn as_bit_index(&self) -> BitIndex { + BitIndex(*self) + } + } + + impl From for Bitfield + where + T: AsBitIndex, + { + fn from(value: T) -> Self { + Self( + { + let mut bv = bitvec::bitvec![u8, Lsb0; 0; value.as_bit_index().0 + 1]; + bv.set(value.as_bit_index().0, true); + bv + }, + Default::default(), + ) + } + } + + impl TryFrom> for Bitfield + where + T: Into>, + { + type Error = BitfieldError; + + fn try_from(mut value: Vec) -> Result { + if value.is_empty() { + return Err(BitfieldError::NullAssignment) + } + + let initial_bitfield = + value.pop().expect("Just checked above it's not empty; qed").into(); + + Ok(Self( + value.into_iter().fold(initial_bitfield.0, |initial_bitfield, element| { + let mut bitfield: Bitfield = element.into(); + bitfield + .0 + .resize(std::cmp::max(initial_bitfield.len(), bitfield.0.len()), false); + bitfield.0.bitor(initial_bitfield) + }), + Default::default(), + )) + } + } +} /// random bytes derived from the VRF submitted within the block by the /// block author as a credential and used as input to approval assignment criteria. @@ -84,25 +228,20 @@ pub enum AssignmentCertKindV2 { /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. /// - /// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`] + /// The context used to produce bytes is [`v2::RELAY_VRF_MODULO_CONTEXT`] RelayVRFModulo { /// The sample number used in this cert. sample: u32, }, /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the - /// candidate was included combined with a sample number. + /// candidates were included. /// - /// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`] - RelayVRFModuloCompact { - /// The number of samples. - sample: u32, - /// The assigned cores. - core_indices: Vec, - }, + /// The context is [`v2::RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModuloCompact, /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with the index of a particular core. /// - /// The context is [`RELAY_VRF_DELAY_CONTEXT`] + /// The context is [`v2::RELAY_VRF_DELAY_CONTEXT`] RelayVRFDelay { /// The core index chosen in this cert. core_index: CoreIndex, @@ -288,7 +427,7 @@ impl UnsafeVRFOutput { .0 .attach_input_hash(&pubkey, transcript) .map_err(ApprovalError::SchnorrkelSignature)?; - Ok(RelayVRFStory(inout.make_bytes(RELAY_VRF_STORY_CONTEXT))) + Ok(RelayVRFStory(inout.make_bytes(v1::RELAY_VRF_STORY_CONTEXT))) } } @@ -318,3 +457,58 @@ pub fn babe_unsafe_vrf_info(header: &Header) -> Option { None } + +#[cfg(test)] +mod test { + use super::{ + v2::{BitIndex, Bitfield}, + *, + }; + + #[test] + fn test_assignment_bitfield_from_vec() { + let candidate_indices = vec![1u32, 7, 3, 10, 45, 8, 200, 2]; + let max_index = *candidate_indices.iter().max().unwrap(); + let bitfield = Bitfield::try_from(candidate_indices.clone()).unwrap(); + let candidate_indices = + candidate_indices.into_iter().map(|i| BitIndex(i as usize)).collect::>(); + + // Test 1 bits. + for index in candidate_indices.clone() { + assert!(bitfield.bit_at(index)); + } + + // Test 0 bits. + for index in 0..max_index { + if candidate_indices.contains(&BitIndex(index as usize)) { + continue + } + assert!(!bitfield.bit_at(BitIndex(index as usize))); + } + } + + #[test] + fn test_assignment_bitfield_invariant_msb() { + let core_indices = vec![CoreIndex(1), CoreIndex(3), CoreIndex(10), CoreIndex(20)]; + let mut bitfield = Bitfield::try_from(core_indices.clone()).unwrap(); + assert!(bitfield.inner_mut().pop().unwrap()); + + for i in 0..1024 { + assert!(Bitfield::try_from(CoreIndex(i)).unwrap().inner_mut().pop().unwrap()); + assert!(Bitfield::try_from(i).unwrap().inner_mut().pop().unwrap()); + } + } + + #[test] + fn test_assignment_bitfield_basic() { + let bitfield = Bitfield::try_from(CoreIndex(0)).unwrap(); + assert!(bitfield.bit_at(BitIndex(0))); + assert!(!bitfield.bit_at(BitIndex(1))); + assert_eq!(bitfield.len(), 1); + + let mut bitfield = Bitfield::try_from(20 as CandidateIndex).unwrap(); + assert!(bitfield.bit_at(BitIndex(20))); + assert_eq!(bitfield.inner_mut().count_ones(), 1); + assert_eq!(bitfield.len(), 21); + } +} diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 01d4fb62f7f6..7aee05f7d7c5 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -15,6 +15,8 @@ #![cfg(feature = "full-node")] +use kvdb::DBTransaction; + use super::{columns, other_io_error, DatabaseKind, LOG_TARGET}; use std::{ fs, io, @@ -28,7 +30,8 @@ type Version = u32; const VERSION_FILE_NAME: &'static str = "parachain_db_version"; /// Current db version. -const CURRENT_VERSION: Version = 2; +/// Version 3 changes approval db format for `OurAssignment`. +const CURRENT_VERSION: Version = 3; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -58,6 +61,8 @@ pub(crate) fn try_upgrade_db(db_path: &Path, db_kind: DatabaseKind) -> Result<() Some(0) => migrate_from_version_0_to_1(db_path, db_kind)?, // 1 -> 2 migration Some(1) => migrate_from_version_1_to_2(db_path, db_kind)?, + // 2 -> 3 migration + Some(2) => migrate_from_version_2_to_3(db_path, db_kind)?, // Already at current version, do nothing. Some(CURRENT_VERSION) => (), // This is an arbitrary future version, we don't handle it. @@ -127,6 +132,19 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), }) } +fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { + gum::info!(target: LOG_TARGET, "Migrating parachains db from version 2 to version 3 ..."); + + match db_kind { + DatabaseKind::ParityDB => paritydb_migrate_from_version_2_to_3(path), + DatabaseKind::RocksDB => rocksdb_migrate_from_version_2_to_3(path), + } + .and_then(|result| { + gum::info!(target: LOG_TARGET, "Migration complete! "); + Ok(result) + }) +} + /// Migration from version 0 to version 1: /// * the number of columns has changed from 3 to 5; fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { @@ -278,6 +296,41 @@ fn paritydb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { Ok(()) } +/// Migration from version 2 to version 3. +/// Clears the approval voting db column which changed format and cannot be migrated. +fn paritydb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { + parity_db::clear_column( + path, + super::columns::v2::COL_APPROVAL_DATA.try_into().expect("Invalid column ID"), + ) + .map_err(|e| other_io_error(format!("Error clearing column {:?}", e)))?; + + Ok(()) +} + +/// Migration from version 2 to version 3. +/// Clears the approval voting db column because `OurAssignment` changed format. Not all +/// instances of it can be converted to new version so we need to wipe it clean. +fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { + use kvdb::DBOp; + use kvdb_rocksdb::{Database, DatabaseConfig}; + + let db_path = path + .to_str() + .ok_or_else(|| super::other_io_error("Invalid database path".into()))?; + let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path)?; + + // Wipe all entries in one operation. + let ops = vec![DBOp::DeletePrefix { + col: super::columns::v2::COL_APPROVAL_DATA, + prefix: kvdb::DBKey::from_slice(b""), + }]; + + let transaction = DBTransaction { ops }; + db.write(transaction)?; + Ok(()) +} #[cfg(test)] mod tests { use super::{columns::v2::*, *}; diff --git a/node/subsystem-types/Cargo.toml b/node/subsystem-types/Cargo.toml index 22528503ccc4..78dfa86f7c85 100644 --- a/node/subsystem-types/Cargo.toml +++ b/node/subsystem-types/Cargo.toml @@ -22,3 +22,4 @@ smallvec = "1.8.0" substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } thiserror = "1.0.31" async-trait = "0.1.57" +bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 078b9b5ed049..de24369af0db 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -33,7 +33,10 @@ use polkadot_node_network_protocol::{ UnifiedReputationChange, }; use polkadot_node_primitives::{ - approval::{BlockApprovalMeta, IndirectAssignmentCertV2, IndirectSignedApprovalVote}, + approval::{ + v2::CandidateBitfield, BlockApprovalMeta, IndirectAssignmentCertV2, + IndirectSignedApprovalVote, + }, AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, CollationSecondedSignal, DisputeMessage, DisputeStatus, ErasureChunk, PoV, SignedDisputeStatement, SignedFullStatement, ValidationResult, @@ -770,7 +773,7 @@ pub enum ApprovalVotingMessage { /// Should not be sent unless the block hash is known. CheckAndImportAssignment( IndirectAssignmentCertV2, - Vec, + CandidateBitfield, oneshot::Sender, ), /// Check if the approval vote is valid and can be accepted by our view of the @@ -805,7 +808,7 @@ pub enum ApprovalDistributionMessage { NewBlocks(Vec), /// Distribute an assignment cert from the local validator. The cert is assumed /// to be valid, relevant, and for the given relay-parent and validator index. - DistributeAssignment(IndirectAssignmentCertV2, Vec), + DistributeAssignment(IndirectAssignmentCertV2, CandidateBitfield), /// Distribute an approval vote for the local validator. The approval vote is assumed to be /// valid, relevant, and the corresponding approval already issued. /// If not, the subsystem is free to drop the message. diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md index 2761f21b1c2c..3f1eeedfc27a 100644 --- a/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -54,6 +54,8 @@ struct OurAssignment { tranche: DelayTranche, validator_index: ValidatorIndex, triggered: bool, + /// A subset of the core indices obtained from the VRF output. + pub assignment_bitfield: AssignmentBitfield, } struct ApprovalEntry { From 1a38026777e4313ccfee2c3fbcb2008eeda5a54e Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Sat, 8 Apr 2023 12:42:13 +0000 Subject: [PATCH 033/105] fix merge damage Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/lib.rs | 21 ++++++------------- node/network/approval-distribution/src/lib.rs | 6 ++++-- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 78165507a75a..80479f77428e 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1503,7 +1503,6 @@ async fn handle_approved_ancestor( let mut span = span .child("handle-approved-ancestor") .with_stage(jaeger::Stage::ApprovalChecking); - use bitvec::{order::Lsb0, vec::BitVec}; let mut all_approved_max = None; @@ -1839,9 +1838,13 @@ fn check_and_import_assignment( .map(|span| span.child("check-and-import-assignment")) .unwrap_or_else(|| jaeger::Span::new(assignment.block_hash, "check-and-import-assignment")) .with_relay_parent(assignment.block_hash) - .with_uint_tag("candidate-index", candidate_index as u64) + // .with_uint_tag("candidate-index", candidate_index as u64) .with_stage(jaeger::Stage::ApprovalChecking); + for candidate_index in candidate_indices.iter_ones() { + check_and_import_assignment_span.add_uint_tag("candidate-index", candidate_index as u64); + } + let block_entry = match db.load_block_entry(&assignment.block_hash)? { Some(b) => b, None => @@ -1901,15 +1904,6 @@ fn check_and_import_assignment( format!("{:?}", jaeger::hash_to_trace_identifier(assigned_candidate_hash.0)), ); - let mut candidate_entry = match db.load_candidate_entry(&assigned_candidate_hash)? { - Some(c) => c, - None => - return Ok(AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCandidate( - candidate_index, - assigned_candidate_hash, - ))), - }; - let approval_entry = match candidate_entry.approval_entry_mut(&assignment.block_hash) { Some(a) => a, None => @@ -2001,6 +1995,7 @@ fn check_and_import_assignment( }; is_duplicate |= approval_entry.is_assigned(assignment.validator); approval_entry.import_assignment(tranche, assignment.validator, tick_now); + check_and_import_assignment_span.add_uint_tag("tranche", tranche as u64); // We've imported a new assignment, so we need to schedule a wake-up for when that might no-show. if let Some((approval_entry, status)) = @@ -2020,10 +2015,6 @@ fn check_and_import_assignment( // We also write the candidate entry as it now contains the new candidate. db.write_candidate_entry(candidate_entry.into()); } - check_and_import_assignment_span.add_uint_tag("tranche", tranche as u64); - - let is_duplicate = approval_entry.is_assigned(assignment.validator); - approval_entry.import_assignment(tranche, assignment.validator, tick_now); if is_duplicate { AssignmentCheckResult::AcceptedDuplicate diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index ee08eea0e872..f730a4dcae22 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1913,6 +1913,7 @@ impl ApprovalDistribution { // This is due to the fact that we call this on wakeup, and we do have a wakeup for each candidate index, but let _span = state .spans + .get(&cert.block_hash) .map(|span| span.child("import-and-distribute-assignment")) .unwrap_or_else(|| jaeger::Span::new(&cert.block_hash, "distribute-assignment")) .with_string_tag("block-hash", format!("{:?}", cert.block_hash)) @@ -1920,8 +1921,9 @@ impl ApprovalDistribution { gum::debug!( target: LOG_TARGET, - "Distributing our assignment on candidate (block={}, indices={:?})", - candidate_indices, + ?candidate_indices, + block_hash = ?cert.block_hash, + "Distributing our assignment on candidates", ); state From b0402a382ad4ab136cd53025d16bb537f9bc7d23 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 18 Apr 2023 14:06:48 +0000 Subject: [PATCH 034/105] Include core bitfield in compact assignments Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 143 ++++++++++++---------- node/core/approval-voting/src/lib.rs | 24 ++-- node/primitives/src/approval.rs | 5 +- 3 files changed, 92 insertions(+), 80 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 11fa1657297f..0af5e87cc057 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -122,7 +122,6 @@ fn relay_vrf_modulo_transcript_v1(relay_vrf_story: RelayVRFStory, sample: u32) - } fn relay_vrf_modulo_transcript_v2(relay_vrf_story: RelayVRFStory) -> Transcript { - // combine the relay VRF story with a sample number. relay_vrf_modulo_transcript_inner( Transcript::new(approval_types::v2::RELAY_VRF_MODULO_CONTEXT), relay_vrf_story, @@ -255,7 +254,7 @@ pub(crate) trait AssignmentCriteria { fn check_assignment_cert( &self, - claimed_core_index: Option, + claimed_core_bitfield: CoreBitfield, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, @@ -276,12 +275,12 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, true) } fn check_assignment_cert( &self, - claimed_core_index: Option, + claimed_core_bitfield: CoreBitfield, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, @@ -289,7 +288,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { backing_groups: Vec, ) -> Result<(CoreBitfield, DelayTranche), InvalidAssignment> { check_assignment_cert( - claimed_core_index, + claimed_core_bitfield, validator_index, config, relay_vrf_story, @@ -467,6 +466,12 @@ fn compute_relay_vrf_modulo_assignments( } } +fn assigned_cores_transcript(core_bitfield: &CoreBitfield) -> Transcript { + let mut t = Transcript::new(approval_types::v2::ASSIGNED_CORE_CONTEXT); + core_bitfield.using_encoded(|s| t.append_message(b"cores", s)); + t +} + fn compute_relay_vrf_modulo_assignments_v2( assignments_key: &schnorrkel::Keypair, validator_index: ValidatorIndex, @@ -476,9 +481,10 @@ fn compute_relay_vrf_modulo_assignments_v2( assignments: &mut HashMap, ) { let mut assigned_cores = Vec::new(); + // for rvm_sample in 0..config.relay_vrf_modulo_samples { let maybe_assignment = { let assigned_cores = &mut assigned_cores; - assignments_key.vrf_sign_after_check( + assignments_key.vrf_sign_extra_after_check( relay_vrf_modulo_transcript_v2(relay_vrf_story.clone()), |vrf_in_out| { *assigned_cores = relay_vrf_modulo_cores( @@ -498,37 +504,40 @@ fn compute_relay_vrf_modulo_assignments_v2( ?assigned_cores, ?validator_index, tranche = 0, - "Produced RelayVRFModuloCompact Assignment." + "RelayVRFModuloCompact Assignment." ); - true + let assignment_bitfield: CoreBitfield = assigned_cores + .clone() + .try_into() + .expect("Just checked `!assigned_cores.is_empty()`; qed"); + + Some(assigned_cores_transcript(&assignment_bitfield)) } else { - false + None } }, ) }; if let Some(assignment) = maybe_assignment.map(|(vrf_in_out, vrf_proof, _)| { + let assignment_bitfield: CoreBitfield = assigned_cores + .clone() + .try_into() + .expect("Just checked `!assigned_cores.is_empty()`; qed"); + let cert = AssignmentCertV2 { - kind: AssignmentCertKindV2::RelayVRFModuloCompact, + kind: AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: assignment_bitfield.clone(), + }, vrf: ( approval_types::VRFOutput(vrf_in_out.to_output()), approval_types::VRFProof(vrf_proof), ), }; - // All assignments of type RelayVRFModuloCompact have tranche 0. - OurAssignment { - cert, - tranche: 0, - validator_index, - triggered: false, - assignment_bitfield: assigned_cores - .clone() - .try_into() - .expect("Just checked `!assigned_cores.is_empty()`; qed"), - } + // All assignments of type RelayVRFModulo have tranche 0. + OurAssignment { cert, tranche: 0, validator_index, triggered: false, assignment_bitfield } }) { for core_index in assigned_cores { assignments.insert(core_index, assignment.clone()); @@ -641,7 +650,7 @@ pub(crate) enum InvalidAssignmentReason { /// For v2 assignments of type `AssignmentCertKindV2::RelayVRFModuloCompact` we don't need to pass /// `claimed_core_index` it won't be used in the check. pub(crate) fn check_assignment_cert( - claimed_core_index: Option, + claimed_core_indices: CoreBitfield, validator_index: ValidatorIndex, config: &Config, relay_vrf_story: RelayVRFStory, @@ -658,14 +667,23 @@ pub(crate) fn check_assignment_cert( let public = schnorrkel::PublicKey::from_bytes(validator_public.as_slice()) .map_err(|_| InvalidAssignment(Reason::InvalidAssignmentKey))?; - // For v1 assignments Check that the validator was not part of the backing group + // Check that we have all backing groups for claimed cores. + if claimed_core_indices.count_ones() == 0 || + claimed_core_indices.count_ones() != backing_groups.len() + { + return Err(InvalidAssignment(Reason::InvalidArguments)) + } + + // Check that the validator was not part of the backing group // and not already assigned. - if let Some(claimed_core_index) = claimed_core_index.as_ref() { - if claimed_core_index.0 >= config.n_cores { + for (claimed_core, backing_group) in claimed_core_indices.iter_ones().zip(backing_groups.iter()) + { + if claimed_core >= config.n_cores as usize { return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds)) } + let is_in_backing = - is_in_backing_group(&config.validator_groups, validator_index, backing_groups[0]); + is_in_backing_group(&config.validator_groups, validator_index, *backing_group); if is_in_backing { return Err(InvalidAssignment(Reason::IsInBackingGroup)) @@ -674,86 +692,75 @@ pub(crate) fn check_assignment_cert( let &(ref vrf_output, ref vrf_proof) = &assignment.vrf; match &assignment.kind { - AssignmentCertKindV2::RelayVRFModuloCompact => { + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => { let (vrf_in_out, _) = public - .vrf_verify( + .vrf_verify_extra( relay_vrf_modulo_transcript_v2(relay_vrf_story), &vrf_output.0, &vrf_proof.0, + assigned_cores_transcript(core_bitfield), ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; - // Get unique core assignments from the VRF wrt `config.n_cores`. - // Some of the core indices might be invalid, as there was no candidate included in the - // relay chain block for that core. - // - // The caller must check if the claimed candidate indices are valid - // and refer to the valid subset of cores outputed by the VRF here. - let vrf_unique_cores = relay_vrf_modulo_cores( + let resulting_cores = relay_vrf_modulo_cores( &vrf_in_out, config.relay_vrf_modulo_samples, config.n_cores, ); - // Filter out cores in which the validator is in the backing group. - let resulting_cores = vrf_unique_cores - .iter() - .zip(backing_groups.iter()) - .filter_map(|(core, backing_group)| { - if is_in_backing_group( - &config.validator_groups, - validator_index, - *backing_group, - ) { - None - } else { - Some(*core) - } - }) - .collect::>(); + // TODO: Enforce that all claimable cores are claimed, or include refused cores. + // Currently validators can opt out of checking specific cores. + // This is the same issue to how validator can opt out and not send their assignments in the first place. - CoreBitfield::try_from(resulting_cores) - .map(|bitfield| (bitfield, 0)) - .map_err(|_| InvalidAssignment(Reason::NullAssignment)) + // Ensure that the `vrf_in_out` actually includes all of the claimed cores. + if claimed_core_indices.iter_ones().fold(true, |cores_match, core| { + cores_match & resulting_cores.contains(&CoreIndex(core as u32)) + }) { + Ok((claimed_core_indices, 0)) + } else { + gum::debug!( + target: LOG_TARGET, + ?resulting_cores, + ?claimed_core_indices, + "Assignment claimed cores mismatch", + ); + Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) + } }, AssignmentCertKindV2::RelayVRFModulo { sample } => { if *sample >= config.relay_vrf_modulo_samples { return Err(InvalidAssignment(Reason::SampleOutOfBounds)) } - // This is a v1 assignment for which we need the core index. - let claimed_core_index = - claimed_core_index.ok_or(InvalidAssignment(Reason::InvalidArguments))?; - let (vrf_in_out, _) = public .vrf_verify_extra( relay_vrf_modulo_transcript_v1(relay_vrf_story, *sample), &vrf_output.0, &vrf_proof.0, - assigned_core_transcript(claimed_core_index), + assigned_core_transcript(CoreIndex( + claimed_core_indices.first_one().expect("Checked above; qed") as u32, + )), ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; let core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); // ensure that the `vrf_in_out` actually gives us the claimed core. - if core == claimed_core_index { - Ok((core.into(), 0)) + if core.0 as usize == claimed_core_indices.first_one().expect("Checked above; qed") { + Ok((claimed_core_indices.into(), 0)) } else { gum::debug!( target: LOG_TARGET, ?core, - ?claimed_core_index, - "Assignment claimed core mismatch", + ?claimed_core_indices, + "Assignment claimed cores mismatch", ); Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } }, AssignmentCertKindV2::RelayVRFDelay { core_index } => { - // This is a v1 assignment for which we need the core index. - let claimed_core_index = - claimed_core_index.ok_or(InvalidAssignment(Reason::InvalidArguments))?; - - if *core_index != claimed_core_index { + if core_index.0 as usize != + claimed_core_indices.first_one().expect("Checked above; qed") + { return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch)) } diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 80479f77428e..76e782ef5f79 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -26,8 +26,7 @@ use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ v2::{BitfieldError, CandidateBitfield, CoreBitfield}, - AssignmentCertKindV2, BlockApprovalMeta, DelayTranche, IndirectAssignmentCertV2, - IndirectSignedApprovalVote, + BlockApprovalMeta, DelayTranche, IndirectAssignmentCertV2, IndirectSignedApprovalVote, }, ValidationResult, }; @@ -95,6 +94,7 @@ mod time; use crate::{ approval_db::v1::{Config as DatabaseConfig, DbBackend}, backend::{Backend, OverlayedBackend}, + criteria::InvalidAssignmentReason, }; #[cfg(test)] @@ -1921,18 +1921,20 @@ fn check_and_import_assignment( assigned_candidate_hashes.push(assigned_candidate_hash); } - let claimed_core_index = match assignment.cert.kind { - // TODO: remove CoreIndex from certificates completely. - // https://github.com/paritytech/polkadot/issues/6988 - AssignmentCertKindV2::RelayVRFDelay { .. } => Some(claimed_core_indices[0]), - AssignmentCertKindV2::RelayVRFModulo { .. } => Some(claimed_core_indices[0]), - // VRelayVRFModuloCompact assignment doesn't need the the claimed cores for checking. - AssignmentCertKindV2::RelayVRFModuloCompact => None, - }; + // Error on null assignments. + if claimed_core_indices.is_empty() { + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCert( + assignment.validator, + format!("{:?}", InvalidAssignmentReason::NullAssignment), + )), + Vec::new(), + )) + } // Check the assignment certificate. let res = state.assignment_criteria.check_assignment_cert( - claimed_core_index, + claimed_core_indices.try_into().expect("Checked for null assignment above; qed"), assignment.validator, &criteria::Config::from(session_info), block_entry.relay_vrf_story(), diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index d7e7be861aab..1b6a6126439d 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -237,7 +237,10 @@ pub enum AssignmentCertKindV2 { /// candidates were included. /// /// The context is [`v2::RELAY_VRF_MODULO_CONTEXT`] - RelayVRFModuloCompact, + RelayVRFModuloCompact { + /// A bitfield representing the core indices claimed by this assignment. + core_bitfield: super::approval::v2::CoreBitfield, + }, /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with the index of a particular core. /// From 2ad5234bec2647c25586dd61f7484a2087c2c587 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 20 Apr 2023 09:16:26 +0000 Subject: [PATCH 035/105] Fix existing approval voting tests Signed-off-by: Andrei Sandu --- .../approval-voting/src/approval_checking.rs | 22 +++--- .../src/approval_db/v1/tests.rs | 2 +- node/core/approval-voting/src/criteria.rs | 75 +++++++++---------- node/core/approval-voting/src/import.rs | 4 +- node/core/approval-voting/src/lib.rs | 11 ++- node/core/approval-voting/src/tests.rs | 61 ++++++++++----- 6 files changed, 97 insertions(+), 78 deletions(-) diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index aba03ed1ce30..30e9d96ea2cb 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -474,7 +474,7 @@ mod tests { let approval_entry = approval_db::v1::ApprovalEntry { tranches: Vec::new(), - assignments: BitVec::default(), + assigned_validators: BitVec::default(), our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -524,7 +524,7 @@ mod tests { assignments: (5..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, ], - assignments: bitvec![u8, BitOrderLsb0; 1; 10], + assigned_validators: bitvec![u8, BitOrderLsb0; 1; 10], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -596,7 +596,7 @@ mod tests { assignments: (6..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, ], - assignments: bitvec![u8, BitOrderLsb0; 1; 10], + assigned_validators: bitvec![u8, BitOrderLsb0; 1; 10], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -649,7 +649,7 @@ mod tests { let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; 5], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; 5], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -693,7 +693,7 @@ mod tests { let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; 10], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -733,7 +733,7 @@ mod tests { let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; 10], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -778,7 +778,7 @@ mod tests { let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; n_validators], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -845,7 +845,7 @@ mod tests { let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; n_validators], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -936,7 +936,7 @@ mod tests { let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { tranches: Vec::new(), - assignments: bitvec![u8, BitOrderLsb0; 0; n_validators], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -1049,7 +1049,7 @@ mod tests { assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, ], - assignments: bitvec![u8, BitOrderLsb0; 1; 3], + assigned_validators: bitvec![u8, BitOrderLsb0; 1; 3], our_assignment: None, our_approval_sig: None, backing_group: GroupIndex(0), @@ -1099,7 +1099,7 @@ mod tests { backing_group: GroupIndex(0), our_assignment: None, our_approval_sig: None, - assignments: bitvec![u8, BitOrderLsb0; 0; 3], + assigned_validators: bitvec![u8, BitOrderLsb0; 0; 3], approved: false, } .into(); diff --git a/node/core/approval-voting/src/approval_db/v1/tests.rs b/node/core/approval-voting/src/approval_db/v1/tests.rs index 5b6602882e59..c967c8640911 100644 --- a/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -98,7 +98,7 @@ fn read_write() { backing_group: GroupIndex(1), our_assignment: None, our_approval_sig: None, - assignments: Default::default(), + assigned_validators: Default::default(), approved: false, }, )] diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 0af5e87cc057..f964074b250b 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -262,7 +262,7 @@ pub(crate) trait AssignmentCriteria { // Backing groups for each "leaving core". backing_groups: Vec, // TODO: maybe define record or something else than tuple - ) -> Result<(CoreBitfield, DelayTranche), InvalidAssignment>; + ) -> Result; } pub(crate) struct RealAssignmentCriteria; @@ -286,7 +286,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { relay_vrf_story: RelayVRFStory, assignment: &AssignmentCertV2, backing_groups: Vec, - ) -> Result<(CoreBitfield, DelayTranche), InvalidAssignment> { + ) -> Result { check_assignment_cert( claimed_core_bitfield, validator_index, @@ -656,7 +656,7 @@ pub(crate) fn check_assignment_cert( relay_vrf_story: RelayVRFStory, assignment: &AssignmentCertV2, backing_groups: Vec, -) -> Result<(CoreBitfield, DelayTranche), InvalidAssignment> { +) -> Result { use InvalidAssignmentReason as Reason; let validator_public = config @@ -716,7 +716,7 @@ pub(crate) fn check_assignment_cert( if claimed_core_indices.iter_ones().fold(true, |cores_match, core| { cores_match & resulting_cores.contains(&CoreIndex(core as u32)) }) { - Ok((claimed_core_indices, 0)) + Ok(0) } else { gum::debug!( target: LOG_TARGET, @@ -746,7 +746,7 @@ pub(crate) fn check_assignment_cert( let core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); // ensure that the `vrf_in_out` actually gives us the claimed core. if core.0 as usize == claimed_core_indices.first_one().expect("Checked above; qed") { - Ok((claimed_core_indices.into(), 0)) + Ok(0) } else { gum::debug!( target: LOG_TARGET, @@ -772,13 +772,10 @@ pub(crate) fn check_assignment_cert( ) .map_err(|_| InvalidAssignment(Reason::VRFDelayOutputMismatch))?; - Ok(( - (*core_index).into(), - relay_vrf_delay_tranche( - &vrf_in_out, - config.n_delay_tranches, - config.zeroth_delay_tranche_width, - ), + Ok(relay_vrf_delay_tranche( + &vrf_in_out, + config.n_delay_tranches, + config.zeroth_delay_tranche_width, )) }, } @@ -885,6 +882,7 @@ mod tests { n_delay_tranches: 40, }, vec![(c_a, CoreIndex(0), GroupIndex(1)), (c_b, CoreIndex(1), GroupIndex(0))], + false, ); // Note that alice is in group 0, which was the backing group for core 1. @@ -920,6 +918,7 @@ mod tests { n_delay_tranches: 40, }, vec![(c_a, CoreIndex(0), GroupIndex(0)), (c_b, CoreIndex(1), GroupIndex(1))], + false, ); assert_eq!(assignments.len(), 1); @@ -947,6 +946,7 @@ mod tests { n_delay_tranches: 40, }, vec![], + false, ); assert!(assignments.is_empty()); @@ -954,8 +954,8 @@ mod tests { #[derive(Debug)] struct MutatedAssignment { - cores: Vec, - cert: AssignmentCert, + cores: CoreBitfield, + cert: AssignmentCertV2, groups: Vec, own_group: GroupIndex, val_index: ValidatorIndex, @@ -999,24 +999,20 @@ mod tests { ) }) .collect::>(), + false, ); let mut counted = 0; for (core, assignment) in assignments { let cores = match assignment.cert.kind.clone() { - AssignmentCertKind::RelayVRFModuloCompact { sample: _, core_indices } => - core_indices, - AssignmentCertKind::RelayVRFModulo { sample: _ } => { - vec![core] - }, - AssignmentCertKind::RelayVRFDelay { core_index } => { - vec![core_index] - }, + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => core_bitfield, + AssignmentCertKindV2::RelayVRFModulo { sample: _ } => core.into(), + AssignmentCertKindV2::RelayVRFDelay { core_index } => core_index.into(), }; let mut mutated = MutatedAssignment { cores: cores.clone(), - groups: cores.clone().into_iter().map(|core| group_for_core(core.0 as _)).collect(), + groups: cores.iter_ones().map(|core| group_for_core(core)).collect(), cert: assignment.cert, own_group: GroupIndex(0), val_index: ValidatorIndex(0), @@ -1053,7 +1049,7 @@ mod tests { #[test] fn check_rejects_claimed_core_out_of_bounds() { check_mutated_assignments(200, 100, 25, |m| { - m.cores[0].0 += 100; + m.cores = CoreIndex(100).into(); Some(false) }); } @@ -1078,7 +1074,7 @@ mod tests { fn check_rejects_delay_bad_vrf() { check_mutated_assignments(40, 10, 8, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFDelay { .. } => { + AssignmentCertKindV2::RelayVRFDelay { .. } => { m.cert.vrf = garbage_vrf(); Some(false) }, @@ -1091,11 +1087,11 @@ mod tests { fn check_rejects_modulo_bad_vrf() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFModulo { .. } => { + AssignmentCertKindV2::RelayVRFModulo { .. } => { m.cert.vrf = garbage_vrf(); Some(false) }, - AssignmentCertKind::RelayVRFModuloCompact { .. } => { + AssignmentCertKindV2::RelayVRFModuloCompact { .. } => { m.cert.vrf = garbage_vrf(); Some(false) }, @@ -1108,14 +1104,11 @@ mod tests { fn check_rejects_modulo_sample_out_of_bounds() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFModulo { sample } => { - m.config.relay_vrf_modulo_samples = sample; - Some(false) - }, - AssignmentCertKind::RelayVRFModuloCompact { sample, core_indices: _ } => { + AssignmentCertKindV2::RelayVRFModulo { sample } => { m.config.relay_vrf_modulo_samples = sample; Some(false) }, + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield: _ } => Some(false), _ => None, // skip everything else. } }); @@ -1125,10 +1118,11 @@ mod tests { fn check_rejects_delay_claimed_core_wrong() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFDelay { .. } => { - for core in &mut m.cores { - core.0 = (core.0 + 1) % 100; - } + AssignmentCertKindV2::RelayVRFDelay { .. } => { + // for core in &mut m.cores { + // core.0 = (core.0 + 1) % 100; + // } + m.cores = CoreIndex((m.cores.first_one().unwrap() + 1) as u32 % 100).into(); Some(false) }, _ => None, // skip everything else. @@ -1140,11 +1134,10 @@ mod tests { fn check_rejects_modulo_core_wrong() { check_mutated_assignments(200, 100, 25, |m| { match m.cert.kind.clone() { - AssignmentCertKind::RelayVRFModulo { .. } | - AssignmentCertKind::RelayVRFModuloCompact { .. } => { - for core in &mut m.cores { - core.0 = (core.0 + 1) % 100; - } + AssignmentCertKindV2::RelayVRFModulo { .. } | + AssignmentCertKindV2::RelayVRFModuloCompact { .. } => { + m.cores = CoreIndex((m.cores.first_one().unwrap() + 1) as u32 % 100).into(); + Some(false) }, _ => None, // skip everything else. diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index ec9f741a3ec6..f51762dc1410 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -691,11 +691,11 @@ pub(crate) mod tests { fn check_assignment_cert( &self, - _claimed_core_index: Vec, + _claimed_core_bitfield: polkadot_node_primitives::approval::v2::CoreBitfield, _validator_index: polkadot_primitives::ValidatorIndex, _config: &criteria::Config, _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, - _assignment: &polkadot_node_primitives::approval::AssignmentCert, + _assignment: &polkadot_node_primitives::approval::AssignmentCertV2, _backing_groups: Vec, ) -> Result { Ok(0) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 76e782ef5f79..d17f674d2f9a 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1934,7 +1934,10 @@ fn check_and_import_assignment( // Check the assignment certificate. let res = state.assignment_criteria.check_assignment_cert( - claimed_core_indices.try_into().expect("Checked for null assignment above; qed"), + claimed_core_indices + .clone() + .try_into() + .expect("Checked for null assignment above; qed"), assignment.validator, &criteria::Config::from(session_info), block_entry.relay_vrf_story(), @@ -1942,7 +1945,7 @@ fn check_and_import_assignment( backing_groups, ); - let (claimed_core_indices, tranche) = match res { + let tranche = match res { Err(crate::criteria::InvalidAssignment(reason)) => return Ok(( AssignmentCheckResult::Bad(AssignmentCheckError::InvalidCert( @@ -1951,7 +1954,7 @@ fn check_and_import_assignment( )), Vec::new(), )), - Ok((claimed_core_indices, tranche)) => { + Ok(tranche) => { let current_tranche = state.clock.tranche_now(state.slot_duration_millis, block_entry.slot()); @@ -1961,7 +1964,7 @@ fn check_and_import_assignment( return Ok((AssignmentCheckResult::TooFarInFuture, Vec::new())) } - (claimed_core_indices, tranche) + tranche }, }; diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 916e84bee102..5f657b8688dc 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -19,8 +19,8 @@ use crate::tests::test_constants::TEST_CONFIG; use super::*; use polkadot_node_primitives::{ approval::{ - AssignmentCert, AssignmentCertKind, DelayTranche, VRFOutput, VRFProof, - RELAY_VRF_MODULO_CONTEXT, + v1::RELAY_VRF_MODULO_CONTEXT, AssignmentCert, AssignmentCertKind, AssignmentCertKindV2, + AssignmentCertV2, DelayTranche, VRFOutput, VRFProof, }, AvailableData, BlockData, PoV, }; @@ -248,11 +248,11 @@ where fn check_assignment_cert( &self, - _claimed_core_index: Vec, + _claimed_core_bitfield: polkadot_node_primitives::approval::v2::CoreBitfield, validator_index: ValidatorIndex, _config: &criteria::Config, _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, - _assignment: &polkadot_node_primitives::approval::AssignmentCert, + _assignment: &polkadot_node_primitives::approval::AssignmentCertV2, _backing_groups: Vec, ) -> Result { self.1(validator_index) @@ -395,6 +395,17 @@ fn garbage_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { AssignmentCert { kind, vrf: (VRFOutput(out), VRFProof(proof)) } } +fn garbage_assignment_cert_v2(kind: AssignmentCertKindV2) -> AssignmentCertV2 { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"test-garbage"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let out = inout.to_output(); + + AssignmentCertV2 { kind, vrf: (VRFOutput(out), VRFProof(proof)) } +} + fn sign_approval( key: Sr25519Keyring, candidate_hash: CandidateHash, @@ -624,9 +635,10 @@ async fn check_and_import_assignment( IndirectAssignmentCertV2 { block_hash, validator, - cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }), + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), }, - vec![candidate_index], + candidate_index.into(), tx, ), }, @@ -1112,9 +1124,10 @@ fn blank_subsystem_act_on_bad_block() { validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0, - }), + }) + .into(), }, - vec![0u32], + 0u32.into(), tx, ), }, @@ -1780,9 +1793,10 @@ fn linear_import_act_on_leaf() { validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0, - }), + }) + .into(), }, - vec![0u32], + 0u32.into(), tx, ), }, @@ -1850,9 +1864,10 @@ fn forkful_import_at_same_height_act_on_leaf() { validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0, - }), + }) + .into(), }, - vec![0u32], + 0u32.into(), tx, ), }, @@ -2297,7 +2312,9 @@ fn subsystem_validate_approvals_cache() { let _ = assignments.insert( CoreIndex(0), approval_db::v1::OurAssignment { - cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }), + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + assignment_bitfield: CoreIndex(0u32).into(), tranche: 0, validator_index: ValidatorIndex(0), triggered: false, @@ -2308,10 +2325,14 @@ fn subsystem_validate_approvals_cache() { let _ = assignments.insert( CoreIndex(0), approval_db::v1::OurAssignment { - cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModuloCompact { - sample: 0, - core_indices: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)], + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), }), + assignment_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), tranche: 0, validator_index: ValidatorIndex(0), triggered: false, @@ -2419,7 +2440,7 @@ async fn handle_double_assignment_import( _, c_indices, )) => { - assert_eq!(vec![candidate_index], c_indices); + assert_eq!(Into::::into(candidate_index), c_indices); } ); @@ -2432,7 +2453,7 @@ async fn handle_double_assignment_import( _, c_index )) => { - assert_eq!(candidate_index, c_index); + assert_eq!(Into::::into(candidate_index), c_index); } ); @@ -2528,7 +2549,9 @@ where let _ = assignments.insert( CoreIndex(0), approval_db::v1::OurAssignment { - cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }), + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + assignment_bitfield: CoreIndex(0).into(), tranche: our_assigned_tranche, validator_index: ValidatorIndex(0), triggered: false, From 6a8f17ace3370e2fda40851404bab8c8fb005a8e Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 20 Apr 2023 15:09:30 +0000 Subject: [PATCH 036/105] Add bitfield certificate extra check Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index f964074b250b..91ee1b7603b0 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -693,6 +693,11 @@ pub(crate) fn check_assignment_cert( let &(ref vrf_output, ref vrf_proof) = &assignment.vrf; match &assignment.kind { AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => { + // Check that claimed core bitfield match the one from certificate. + if claimed_core_indices != core_bitfield { + return Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) + } + let (vrf_in_out, _) = public .vrf_verify_extra( relay_vrf_modulo_transcript_v2(relay_vrf_story), From a8f952cb34d63abd9588cd6ef8ba35b24d85de06 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 20 Apr 2023 15:09:49 +0000 Subject: [PATCH 037/105] Approval dist test compilation fixes Signed-off-by: Andrei Sandu --- .../approval-distribution/src/tests.rs | 134 ++++++++++++------ 1 file changed, 88 insertions(+), 46 deletions(-) diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 67fcea33b018..ab7a4275f7f9 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -24,7 +24,8 @@ use polkadot_node_network_protocol::{ view, ObservedRole, }; use polkadot_node_primitives::approval::{ - AssignmentCert, AssignmentCertKind, VRFOutput, VRFProof, RELAY_VRF_MODULO_CONTEXT, + v2::RELAY_VRF_MODULO_CONTEXT, AssignmentCert, AssignmentCertKind, AssignmentCertKindV2, + AssignmentCertV2, IndirectAssignmentCert, VRFOutput, VRFProof, }; use polkadot_node_subsystem::messages::{network_bridge_event, AllMessages, ApprovalCheckError}; use polkadot_node_subsystem_test_helpers as test_helpers; @@ -251,7 +252,7 @@ async fn send_message_from_peer( .await; } -fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> IndirectAssignmentCertV2 { +fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> IndirectAssignmentCert { let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); let msg = b"WhenParachains?"; let mut prng = rand_core::OsRng; @@ -259,7 +260,7 @@ fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> Indirect let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); let out = inout.to_output(); - IndirectAssignmentCertV2 { + IndirectAssignmentCert { block_hash, validator, cert: AssignmentCert { @@ -322,7 +323,7 @@ fn try_import_the_same_assignment() { // send the assignment related to `hash` let validator_index = ValidatorIndex(0); let cert = fake_assignment_cert(hash, validator_index); - let assignments = vec![(cert.clone(), vec![0u32])]; + let assignments = vec![(cert.clone(), 0u32)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); send_message_from_peer(overseer, &peer_a, msg).await; @@ -337,8 +338,8 @@ fn try_import_the_same_assignment() { claimed_indices, tx, )) => { - assert_eq!(claimed_indices, vec![0u32]); - assert_eq!(assignment, cert); + assert_eq!(claimed_indices, 0u32.into()); + assert_eq!(assignment, cert.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -427,8 +428,8 @@ fn spam_attack_results_in_negative_reputation_change() { claimed_candidate_index, tx, )) => { - assert_eq!(assignment, assignments[i].0); - assert_eq!(claimed_candidate_index, assignments[i].1); + assert_eq!(assignment, assignments[i].0.into()); + assert_eq!(claimed_candidate_index, assignments[i].1.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -488,11 +489,14 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { // import an assignment related to `hash` locally let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -566,11 +570,14 @@ fn import_approval_happy_path() { // import an assignment related to `hash` locally let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; let cert = fake_assignment_cert(hash, validator_index); overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -652,7 +659,7 @@ fn import_approval_bad() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; let cert = fake_assignment_cert(hash, validator_index); // send the an approval from peer_b, we don't have an assignment yet @@ -679,8 +686,8 @@ fn import_approval_bad() { i, tx, )) => { - assert_eq!(assignment, cert); - assert_eq!(i, candidate_index); + assert_eq!(assignment, cert.into()); + assert_eq!(i, candidate_index.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -825,9 +832,17 @@ fn update_peer_view() { let cert_a = fake_assignment_cert(hash_a, ValidatorIndex(0)); let cert_b = fake_assignment_cert(hash_b, ValidatorIndex(0)); - overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_a, 0)).await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment(cert_a.into(), 0.into()), + ) + .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_b, 0)).await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment(cert_b.into(), 0.into()), + ) + .await; // connect a peer setup_peer_with_view(overseer, peer, view![hash_a]).await; @@ -848,7 +863,7 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!(state.peer_views.get(peer).map(|v| v.finalized_number), Some(0)); + assert_eq!(state.peer_views.get(peer).map(|v| v.view.finalized_number), Some(0)); assert_eq!( state .blocks @@ -879,7 +894,7 @@ fn update_peer_view() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert_c.clone(), 0), + ApprovalDistributionMessage::DistributeAssignment(cert_c.clone().into(), 0.into()), ) .await; @@ -900,7 +915,7 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!(state.peer_views.get(peer).map(|v| v.finalized_number), Some(2)); + assert_eq!(state.peer_views.get(peer).map(|v| v.view.finalized_number), Some(2)); assert_eq!( state .blocks @@ -930,7 +945,7 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!(state.peer_views.get(peer).map(|v| v.finalized_number), Some(finalized_number)); + assert_eq!(state.peer_views.get(peer).map(|v| v.view.finalized_number), Some(finalized_number)); assert!(state.blocks.get(&hash_c).unwrap().known_by.get(peer).is_none()); } @@ -961,7 +976,7 @@ fn import_remotely_then_locally() { // import the assignment remotely first let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; let cert = fake_assignment_cert(hash, validator_index); let assignments = vec![(cert.clone(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); @@ -975,8 +990,8 @@ fn import_remotely_then_locally() { i, tx, )) => { - assert_eq!(assignment, cert); - assert_eq!(i, candidate_index); + assert_eq!(assignment, cert.into()); + assert_eq!(i, candidate_index.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -986,7 +1001,7 @@ fn import_remotely_then_locally() { // import the same assignment locally overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), + ApprovalDistributionMessage::DistributeAssignment(cert.into(), candidate_index.into()), ) .await; @@ -1045,7 +1060,7 @@ fn sends_assignments_even_when_state_is_approved() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1058,7 +1073,10 @@ fn sends_assignments_even_when_state_is_approved() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1168,8 +1186,8 @@ fn race_condition_in_local_vs_remote_view_update() { claimed_candidate_index, tx, )) => { - assert_eq!(assignment, assignments[i].0); - assert_eq!(claimed_candidate_index, assignments[i].1); + assert_eq!(assignment, assignments[i].0.into()); + assert_eq!(claimed_candidate_index, assignments[i].1.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } ); @@ -1223,7 +1241,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1236,7 +1254,10 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1325,7 +1346,7 @@ fn propagates_assignments_along_unshared_dimension() { // Test messages from X direction go to Y peers { let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1374,7 +1395,7 @@ fn propagates_assignments_along_unshared_dimension() { // Test messages from X direction go to Y peers { let validator_index = ValidatorIndex(50); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1471,7 +1492,7 @@ fn propagates_to_required_after_connect() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1484,7 +1505,10 @@ fn propagates_to_required_after_connect() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1596,7 +1620,7 @@ fn sends_to_more_peers_after_getting_topology() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1609,7 +1633,10 @@ fn sends_to_more_peers_after_getting_topology() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1748,7 +1775,7 @@ fn originator_aggression_l1() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1768,7 +1795,10 @@ fn originator_aggression_l1() { overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.clone(), candidate_index), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1906,7 +1936,7 @@ fn non_originator_aggression_l1() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -1918,7 +1948,7 @@ fn non_originator_aggression_l1() { ) .await; - let assignments = vec![(cert.clone(), candidate_index)]; + let assignments = vec![(cert.clone().into(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); // Issuer of the message is important, not the peer we receive from. @@ -2011,7 +2041,7 @@ fn non_originator_aggression_l2() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -2177,7 +2207,7 @@ fn resends_messages_periodically() { overseer_send(overseer, msg).await; let validator_index = ValidatorIndex(0); - let candidate_index = vec![0u32]; + let candidate_index = 0u32; // import an assignment and approval locally. let cert = fake_assignment_cert(hash, validator_index); @@ -2293,7 +2323,9 @@ fn batch_test_round(message_count: usize) { let validators = 0..message_count; let assignments: Vec<_> = validators .clone() - .map(|index| (fake_assignment_cert(Hash::zero(), ValidatorIndex(index as u32)), 0)) + .map(|index| { + (fake_assignment_cert(Hash::zero(), ValidatorIndex(index as u32)).into(), 0.into()) + }) .collect(); let approvals: Vec<_> = validators @@ -2306,8 +2338,18 @@ fn batch_test_round(message_count: usize) { .collect(); let peer = PeerId::random(); - send_assignments_batched(&mut sender, assignments.clone(), peer).await; - send_approvals_batched(&mut sender, approvals.clone(), peer).await; + send_assignments_batched( + &mut sender, + assignments.clone(), + &vec![(peer, ValidationVersion::V1.into())], + ) + .await; + send_approvals_batched( + &mut sender, + approvals.clone(), + &vec![(peer, ValidationVersion::V1.into())], + ) + .await; // Check expected assignments batches. for assignment_index in (0..assignments.len()).step_by(super::MAX_ASSIGNMENT_BATCH_SIZE) { From 31c05482dbb4d6fe82319353e3a567b97d109b44 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 20 Apr 2023 15:29:39 +0000 Subject: [PATCH 038/105] Uncomment aggression Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 234 +++++++++++------- .../approval-distribution/src/metrics.rs | 54 ++-- node/primitives/src/approval.rs | 1 + 3 files changed, 169 insertions(+), 120 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index f730a4dcae22..0e9a94e470cb 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -225,6 +225,47 @@ struct PeerEntry { pub version: ProtocolVersion, } +/// Aggression configuration representation +#[derive(Clone)] +struct AggressionConfig { + /// Aggression level 1: all validators send all their own messages to all peers. + l1_threshold: Option, + /// Aggression level 2: level 1 + all validators send all messages to all peers in the X and Y dimensions. + l2_threshold: Option, + /// How often to re-send messages to all targeted recipients. + /// This applies to all unfinalized blocks. + resend_unfinalized_period: Option, +} + +impl AggressionConfig { + /// Returns `true` if block is not too old depending on the aggression level + fn is_age_relevant(&self, block_age: BlockNumber) -> bool { + if let Some(t) = self.l1_threshold { + block_age >= t + } else if let Some(t) = self.resend_unfinalized_period { + block_age > 0 && block_age % t == 0 + } else { + false + } + } +} + +impl Default for AggressionConfig { + fn default() -> Self { + AggressionConfig { + l1_threshold: Some(13), + l2_threshold: Some(28), + resend_unfinalized_period: Some(8), + } + } +} + +#[derive(PartialEq)] +enum Resend { + Yes, + No, +} + /// The [`State`] struct is responsible for tracking the overall state of the subsystem. /// /// It tracks metadata about our view of the unfinalized chain, @@ -254,6 +295,9 @@ struct State { /// HashMap from active leaves to spans spans: HashMap, + + /// Aggression configuration. + aggression_config: AggressionConfig, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -672,7 +716,7 @@ impl State { } } - // self.enable_aggression(ctx, Resend::Yes, metrics).await; + self.enable_aggression(ctx, Resend::Yes, metrics).await; } async fn handle_new_session_topology( @@ -903,8 +947,8 @@ impl State { async fn handle_block_finalized( &mut self, - _ctx: &mut Context, - _metrics: &Metrics, + ctx: &mut Context, + metrics: &Metrics, finalized_number: BlockNumber, ) { // we want to prune every block up to (including) finalized_number @@ -927,7 +971,7 @@ impl State { // If a block was finalized, this means we may need to move our aggression // forward to the now oldest block(s). - // self.enable_aggression(ctx, Resend::No, metrics).await; + self.enable_aggression(ctx, Resend::No, metrics).await; } async fn import_and_circulate_assignment( @@ -1608,95 +1652,99 @@ impl State { } } - // async fn enable_aggression( - // &mut self, - // ctx: &mut Context, - // resend: Resend, - // metrics: &Metrics, - // ) { - // let min_age = self.blocks_by_number.iter().next().map(|(num, _)| num); - // let max_age = self.blocks_by_number.iter().rev().next().map(|(num, _)| num); - // let config = self.aggression_config.clone(); - - // let (min_age, max_age) = match (min_age, max_age) { - // (Some(min), Some(max)) => (min, max), - // _ => return, // empty. - // }; - - // let diff = max_age - min_age; - // if !self.aggression_config.is_age_relevant(diff) { - // return - // } - - // adjust_required_routing_and_propagate( - // ctx, - // &mut self.blocks, - // &self.topologies, - // |block_entry| { - // let block_age = max_age - block_entry.number; - - // if resend == Resend::Yes && - // config - // .resend_unfinalized_period - // .as_ref() - // .map_or(false, |p| block_age > 0 && block_age % p == 0) - // { - // // Retry sending to all peers. - // for (_, knowledge) in block_entry.known_by.iter_mut() { - // knowledge.sent = Knowledge::default(); - // } - - // true - // } else { - // false - // } - // }, - // |_, _, _| {}, - // ) - // .await; - - // adjust_required_routing_and_propagate( - // ctx, - // &mut self.blocks, - // &self.topologies, - // |block_entry| { - // // Ramp up aggression only for the very oldest block(s). - // // Approval voting can get stuck on a single block preventing - // // its descendants from being finalized. Waste minimal bandwidth - // // this way. Also, disputes might prevent finality - again, nothing - // // to waste bandwidth on newer blocks for. - // &block_entry.number == min_age - // }, - // |required_routing, local, _| { - // // It's a bit surprising not to have a topology at this age. - // if *required_routing == RequiredRouting::PendingTopology { - // gum::debug!( - // target: LOG_TARGET, - // age = ?diff, - // "Encountered old block pending gossip topology", - // ); - // return - // } - - // if config.l1_threshold.as_ref().map_or(false, |t| &diff >= t) { - // // Message originator sends to everyone. - // if local && *required_routing != RequiredRouting::All { - // metrics.on_aggression_l1(); - // *required_routing = RequiredRouting::All; - // } - // } - - // if config.l2_threshold.as_ref().map_or(false, |t| &diff >= t) { - // // Message originator sends to everyone. Everyone else sends to XY. - // if !local && *required_routing != RequiredRouting::GridXY { - // metrics.on_aggression_l2(); - // *required_routing = RequiredRouting::GridXY; - // } - // } - // }, - // ) - // .await; - // } + async fn enable_aggression( + &mut self, + ctx: &mut Context, + resend: Resend, + metrics: &Metrics, + ) { + let min_age = self.blocks_by_number.iter().next().map(|(num, _)| num); + let max_age = self.blocks_by_number.iter().rev().next().map(|(num, _)| num); + let config = self.aggression_config.clone(); + + let (min_age, max_age) = match (min_age, max_age) { + (Some(min), Some(max)) => (min, max), + _ => return, // empty. + }; + + let diff = max_age - min_age; + if !self.aggression_config.is_age_relevant(diff) { + return + } + + adjust_required_routing_and_propagate( + ctx, + &mut self.blocks, + &self.topologies, + |block_entry| { + let block_age = max_age - block_entry.number; + + if resend == Resend::Yes && + config + .resend_unfinalized_period + .as_ref() + .map_or(false, |p| block_age > 0 && block_age % p == 0) + { + // Retry sending to all peers. + for (_, knowledge) in block_entry.known_by.iter_mut() { + knowledge.sent = Knowledge::default(); + } + + true + } else { + false + } + }, + |required_routing, _, _| *required_routing, + &self.peer_views, + ) + .await; + + adjust_required_routing_and_propagate( + ctx, + &mut self.blocks, + &self.topologies, + |block_entry| { + // Ramp up aggression only for the very oldest block(s). + // Approval voting can get stuck on a single block preventing + // its descendants from being finalized. Waste minimal bandwidth + // this way. Also, disputes might prevent finality - again, nothing + // to waste bandwidth on newer blocks for. + &block_entry.number == min_age + }, + |required_routing, local, _| { + // It's a bit surprising not to have a topology at this age. + if *required_routing == RequiredRouting::PendingTopology { + gum::debug!( + target: LOG_TARGET, + age = ?diff, + "Encountered old block pending gossip topology", + ); + return *required_routing + } + + if config.l2_threshold.as_ref().map_or(false, |t| &diff >= t) { + // Message originator sends to everyone. Everyone else sends to XY. + if !local && *required_routing != RequiredRouting::GridXY { + metrics.on_aggression_l2(); + return RequiredRouting::GridXY + } + } + + if config.l1_threshold.as_ref().map_or(false, |t| &diff >= t) { + // Message originator sends to everyone. + if local && *required_routing != RequiredRouting::All { + metrics.on_aggression_l1(); + return RequiredRouting::All + } + } + + *required_routing + }, + &self.peer_views, + ) + .await; + } } // This adjusts the required routing of messages in blocks that pass the block filter diff --git a/node/network/approval-distribution/src/metrics.rs b/node/network/approval-distribution/src/metrics.rs index ff11110a8199..746be543ccb4 100644 --- a/node/network/approval-distribution/src/metrics.rs +++ b/node/network/approval-distribution/src/metrics.rs @@ -26,8 +26,8 @@ struct MetricsInner { assignments_imported_total: prometheus::CounterVec, approvals_imported_total: prometheus::Counter, unified_with_peer_total: prometheus::Counter, - // aggression_l1_messages_total: prometheus::Counter, - // aggression_l2_messages_total: prometheus::Counter, + aggression_l1_messages_total: prometheus::Counter, + aggression_l2_messages_total: prometheus::Counter, time_unify_with_peer: prometheus::Histogram, time_import_pending_now_known: prometheus::Histogram, time_awaiting_approval_voting: prometheus::Histogram, @@ -86,17 +86,17 @@ impl Metrics { .map(|metrics| metrics.time_awaiting_approval_voting.start_timer()) } - // pub(crate) fn on_aggression_l1(&self) { - // if let Some(metrics) = &self.0 { - // metrics.aggression_l1_messages_total.inc(); - // } - // } - - // pub(crate) fn on_aggression_l2(&self) { - // if let Some(metrics) = &self.0 { - // metrics.aggression_l2_messages_total.inc(); - // } - // } + pub(crate) fn on_aggression_l1(&self) { + if let Some(metrics) = &self.0 { + metrics.aggression_l1_messages_total.inc(); + } + } + + pub(crate) fn on_aggression_l2(&self) { + if let Some(metrics) = &self.0 { + metrics.aggression_l2_messages_total.inc(); + } + } } impl MetricsTrait for Metrics { @@ -126,20 +126,20 @@ impl MetricsTrait for Metrics { )?, registry, )?, - // aggression_l1_messages_total: prometheus::register( - // prometheus::Counter::new( - // "polkadot_parachain_approval_distribution_aggression_l1_messages_total", - // "Number of messages in approval distribution for which aggression L1 has been triggered", - // )?, - // registry, - // )?, - // aggression_l2_messages_total: prometheus::register( - // prometheus::Counter::new( - // "polkadot_parachain_approval_distribution_aggression_l2_messages_total", - // "Number of messages in approval distribution for which aggression L2 has been triggered", - // )?, - // registry, - // )?, + aggression_l1_messages_total: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_approval_distribution_aggression_l1_messages_total", + "Number of messages in approval distribution for which aggression L1 has been triggered", + )?, + registry, + )?, + aggression_l2_messages_total: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_approval_distribution_aggression_l2_messages_total", + "Number of messages in approval distribution for which aggression L2 has been triggered", + )?, + registry, + )?, time_unify_with_peer: prometheus::register( prometheus::Histogram::with_opts( prometheus::HistogramOpts::new( diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 1b6a6126439d..b0196055627c 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -284,6 +284,7 @@ impl From for AssignmentCertV2 { } /// Errors that can occur when trying to convert to/from assignment v1/v2 +#[derive(Debug)] pub enum AssignmentConversionError { /// Assignment certificate is not supported in v1. CertificateNotSupported, From ef0bca5d3bc1e38e8999b2552427846179bea009 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 20 Apr 2023 15:29:57 +0000 Subject: [PATCH 039/105] approval-dist: some tests fail, but it compiles Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/tests.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index ab7a4275f7f9..92ad2a6356a1 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -428,7 +428,7 @@ fn spam_attack_results_in_negative_reputation_change() { claimed_candidate_index, tx, )) => { - assert_eq!(assignment, assignments[i].0.into()); + assert_eq!(assignment, assignments[i].0.clone().into()); assert_eq!(claimed_candidate_index, assignments[i].1.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } @@ -990,7 +990,7 @@ fn import_remotely_then_locally() { i, tx, )) => { - assert_eq!(assignment, cert.into()); + assert_eq!(assignment, cert.clone().into()); assert_eq!(i, candidate_index.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } @@ -1001,7 +1001,10 @@ fn import_remotely_then_locally() { // import the same assignment locally overseer_send( overseer, - ApprovalDistributionMessage::DistributeAssignment(cert.into(), candidate_index.into()), + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_index.into(), + ), ) .await; @@ -1186,7 +1189,7 @@ fn race_condition_in_local_vs_remote_view_update() { claimed_candidate_index, tx, )) => { - assert_eq!(assignment, assignments[i].0.into()); + assert_eq!(assignment, assignments[i].0.clone().into()); assert_eq!(claimed_candidate_index, assignments[i].1.into()); tx.send(AssignmentCheckResult::Accepted).unwrap(); } @@ -2371,7 +2374,7 @@ fn batch_test_round(message_count: usize) { assert_eq!(peers.len(), 1); for (message_index, assignment) in sent_assignments.iter().enumerate() { - assert_eq!(assignment.0, assignments[assignment_index + message_index].0); + assert_eq!(assignment.0, assignments[assignment_index + message_index].0.clone().try_into().unwrap()); assert_eq!(assignment.1, 0); } } From 1a7d33be137bdd125e46ec57c163134ca6c7d209 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 21 Apr 2023 14:06:45 +0000 Subject: [PATCH 040/105] Re-enable tests Signed-off-by: Andrei Sandu --- scripts/ci/gitlab/pipeline/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 3db5280b7342..a079ebcefd2d 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -41,7 +41,7 @@ test-linux-stable: # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - # - time cargo test --workspace --profile testnet --verbose --locked --features=runtime-benchmarks,runtime-metrics,try-runtime + - time cargo test --workspace --profile testnet --verbose --locked --features=runtime-benchmarks,runtime-metrics,try-runtime - sleep 1 .check-dependent-project: &check-dependent-project From d2fd321a0dd63ca949bab40aaecb6f706f712e03 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 21 Apr 2023 14:07:33 +0000 Subject: [PATCH 041/105] Fix tests Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 58 +++++++++++-------- .../approval-distribution/src/tests.rs | 4 +- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 0e9a94e470cb..4565a0699925 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1471,20 +1471,27 @@ impl State { let v1_peers = filter_by_peer_version(&peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(&peers, ValidationVersion::VStaging.into()); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v1_peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(approvals.clone()), - )), - )) - .await; - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v2_peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals), - )), - )) - .await; + if v1_peers.len() > 0 { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + v1_peers, + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Approvals(approvals.clone()), + )), + )) + .await; + } + + if v2_peers.len() > 0 { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + v2_peers, + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals), + ), + ), + )) + .await; + } } } @@ -1627,6 +1634,7 @@ impl State { gum::trace!( target: LOG_TARGET, ?peer_id, + ?protocol_version, num = assignments_to_send.len(), "Sending assignments to unified peer", ); @@ -1643,6 +1651,7 @@ impl State { gum::trace!( target: LOG_TARGET, ?peer_id, + ?protocol_version, num = approvals_to_send.len(), "Sending approvals to unified peer", ); @@ -1723,23 +1732,24 @@ impl State { return *required_routing } - if config.l2_threshold.as_ref().map_or(false, |t| &diff >= t) { - // Message originator sends to everyone. Everyone else sends to XY. - if !local && *required_routing != RequiredRouting::GridXY { - metrics.on_aggression_l2(); - return RequiredRouting::GridXY - } - } + let mut new_required_routing = *required_routing; if config.l1_threshold.as_ref().map_or(false, |t| &diff >= t) { // Message originator sends to everyone. - if local && *required_routing != RequiredRouting::All { + if local && new_required_routing != RequiredRouting::All { metrics.on_aggression_l1(); - return RequiredRouting::All + new_required_routing = RequiredRouting::All; } } - *required_routing + if config.l2_threshold.as_ref().map_or(false, |t| &diff >= t) { + // Message originator sends to everyone. Everyone else sends to XY. + if !local && new_required_routing != RequiredRouting::GridXY { + metrics.on_aggression_l2(); + new_required_routing = RequiredRouting::GridXY; + } + } + new_required_routing }, &self.peer_views, ) diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 92ad2a6356a1..6ed66c564a18 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -24,8 +24,8 @@ use polkadot_node_network_protocol::{ view, ObservedRole, }; use polkadot_node_primitives::approval::{ - v2::RELAY_VRF_MODULO_CONTEXT, AssignmentCert, AssignmentCertKind, AssignmentCertKindV2, - AssignmentCertV2, IndirectAssignmentCert, VRFOutput, VRFProof, + v2::RELAY_VRF_MODULO_CONTEXT, AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, + VRFOutput, VRFProof, }; use polkadot_node_subsystem::messages::{network_bridge_event, AllMessages, ApprovalCheckError}; use polkadot_node_subsystem_test_helpers as test_helpers; From 2288086c124c32181d13e2ad65240a4682b69877 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 21 Apr 2023 14:29:20 +0000 Subject: [PATCH 042/105] Fix build Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 91ee1b7603b0..433bd09f5ac2 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -694,7 +694,7 @@ pub(crate) fn check_assignment_cert( match &assignment.kind { AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => { // Check that claimed core bitfield match the one from certificate. - if claimed_core_indices != core_bitfield { + if &claimed_core_indices != core_bitfield { return Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) } From 5755b17ce6f2742098a1913ece1109be3f76c120 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Sun, 23 Apr 2023 12:55:07 +0000 Subject: [PATCH 043/105] clippy fixes Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 433bd09f5ac2..225f98dee902 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -275,7 +275,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores, true) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) } fn check_assignment_cert( @@ -356,7 +356,7 @@ pub(crate) fn compute_assignments( // Ignore any cores where the assigned group is our own. let leaving_cores = leaving_cores .into_iter() - .filter(|&(_, _, ref g)| !is_in_backing_group(&config.validator_groups, index, *g)) + .filter(|(_, _, g)| !is_in_backing_group(&config.validator_groups, index, *g)) .map(|(c_hash, core, _)| (c_hash, core)) .collect::>(); @@ -690,7 +690,7 @@ pub(crate) fn check_assignment_cert( } } - let &(ref vrf_output, ref vrf_proof) = &assignment.vrf; + let (vrf_output, vrf_proof) = &assignment.vrf; match &assignment.kind { AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => { // Check that claimed core bitfield match the one from certificate. From fc43f5bafcd43d98725165d3d7c14998bf24c457 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 24 Apr 2023 12:22:49 +0000 Subject: [PATCH 044/105] fix more tests :( Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/lib.rs | 2 +- node/core/approval-voting/src/ops.rs | 4 +- node/core/dispute-coordinator/src/import.rs | 2 +- .../bitfield-distribution/src/tests.rs | 84 +++++++++++++------ 4 files changed, 61 insertions(+), 31 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index d17f674d2f9a..6809ce704e7b 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -495,7 +495,7 @@ impl Wakeups { .collect(); let mut pruned_wakeups = BTreeMap::new(); - self.reverse_wakeups.retain(|&(ref h, ref c_h), tick| { + self.reverse_wakeups.retain(|(h, c_h), tick| { let live = !pruned_blocks.contains(h); if !live { pruned_wakeups.entry(*tick).or_insert_with(HashSet::new).insert((*h, *c_h)); diff --git a/node/core/approval-voting/src/ops.rs b/node/core/approval-voting/src/ops.rs index 37f564c34f71..c0d6ce0e6054 100644 --- a/node/core/approval-voting/src/ops.rs +++ b/node/core/approval-voting/src/ops.rs @@ -62,7 +62,7 @@ fn visit_and_remove_block_entry( }; overlayed_db.delete_block_entry(&block_hash); - for &(_, ref candidate_hash) in block_entry.candidates() { + for (_, candidate_hash) in block_entry.candidates() { let candidate = match visited_candidates.entry(*candidate_hash) { Entry::Occupied(e) => e.into_mut(), Entry::Vacant(e) => { @@ -227,7 +227,7 @@ pub fn add_block_entry( // read and write all updated entries. { - for &(_, ref candidate_hash) in entry.candidates() { + for (_, candidate_hash) in entry.candidates() { let NewCandidateInfo { candidate, backing_group, our_assignment } = match candidate_info(candidate_hash) { None => return Ok(Vec::new()), diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs index 4f6edc5fcef0..3caca8e02cac 100644 --- a/node/core/dispute-coordinator/src/import.rs +++ b/node/core/dispute-coordinator/src/import.rs @@ -97,7 +97,7 @@ pub enum OwnVoteState { } impl OwnVoteState { - fn new<'a>(votes: &CandidateVotes, env: &CandidateEnvironment<'a>) -> Self { + fn new(votes: &CandidateVotes, env: &CandidateEnvironment) -> Self { let controlled_indices = env.controlled_indices(); if controlled_indices.is_empty() { return Self::CannotVote diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index 0ee9f98346ea..5a7ef85789bd 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -83,7 +83,7 @@ fn prewarmed_state( span: PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"), }, }, - peer_views: peers.iter().cloned().map(|peer| (peer, view!(relay_parent))).collect(), + peer_entries: peers.iter().cloned().map(|peer| (peer, view!(relay_parent))).collect(), topologies, view: our_view!(relay_parent), } @@ -215,7 +215,10 @@ fn receive_invalid_signature() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), invalid_msg.into_network_message()), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + invalid_msg.into_network_message(ValidationVersion::V1.into()) + ), &mut rng, )); @@ -226,7 +229,7 @@ fn receive_invalid_signature() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), invalid_msg_2.into_network_message()), + NetworkBridgeEvent::PeerMessage(peer_b.clone(), invalid_msg_2.into_network_message(ValidationVersion::V1.into())), &mut rng, )); // reputation change due to invalid signature @@ -260,7 +263,7 @@ fn receive_invalid_validator_index() { let (mut state, signing_context, keystore, validator) = state_with_view(our_view![hash_a, hash_b], hash_a.clone()); - state.peer_views.insert(peer_b.clone(), view![hash_a]); + state.peer_entries.insert(peer_b.clone(), view![hash_a]); let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); let signed = Signed::::sign( @@ -286,7 +289,7 @@ fn receive_invalid_validator_index() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.into_network_message()), + NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.into_network_message(ValidationVersion::V1.into())), &mut rng, )); @@ -349,7 +352,10 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -382,7 +388,10 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_a.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -401,7 +410,10 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -447,8 +459,8 @@ fn do_not_relay_message_twice() { .flatten() .expect("should be signed"); - state.peer_views.insert(peer_b.clone(), view![hash]); - state.peer_views.insert(peer_a.clone(), view![hash]); + state.peer_entries.insert(peer_b.clone(), view![hash]); + state.peer_entries.insert(peer_a.clone(), view![hash]); let msg = BitfieldGossipMessage { relay_parent: hash.clone(), @@ -467,7 +479,7 @@ fn do_not_relay_message_twice() { &mut ctx, state.per_relay_parent.get_mut(&hash).unwrap(), &gossip_peers, - &mut state.peer_views, + &mut state.peer_entries, validator.clone(), msg.clone(), RequiredRouting::GridXY, @@ -494,7 +506,7 @@ fn do_not_relay_message_twice() { assert_eq!(2, peers.len()); assert!(peers.contains(&peer_a)); assert!(peers.contains(&peer_b)); - assert_eq!(send_msg, msg.clone().into_validation_protocol()); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V1.into())); } ); @@ -503,7 +515,7 @@ fn do_not_relay_message_twice() { &mut ctx, state.per_relay_parent.get_mut(&hash).unwrap(), &gossip_peers, - &mut state.peer_views, + &mut state.peer_entries, validator.clone(), msg.clone(), RequiredRouting::GridXY, @@ -575,7 +587,7 @@ fn changing_view() { NetworkBridgeEvent::PeerConnected( peer_b.clone(), ObservedRole::Full, - ValidationVersion::V1.into(), + ValidationVersion::VValidationVersion::V1.into(), None ), &mut rng, @@ -590,14 +602,17 @@ fn changing_view() { &mut rng, )); - assert!(state.peer_views.contains_key(&peer_b)); + assert!(state.peer_entries.contains_key(&peer_b)); // recv a first message from the network launch!(handle_network_msg( &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -632,8 +647,11 @@ fn changing_view() { &mut rng, )); - assert!(state.peer_views.contains_key(&peer_b)); - assert_eq!(state.peer_views.get(&peer_b).expect("Must contain value for peer B"), &view![]); + assert!(state.peer_entries.contains_key(&peer_b)); + assert_eq!( + state.peer_entries.get(&peer_b).expect("Must contain value for peer B"), + &view![] + ); // on rx of the same message, since we are not interested, // should give penalty @@ -641,7 +659,10 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -673,7 +694,10 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_a.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -719,8 +743,8 @@ fn do_not_send_message_back_to_origin() { .flatten() .expect("should be signed"); - state.peer_views.insert(peer_b.clone(), view![hash]); - state.peer_views.insert(peer_a.clone(), view![hash]); + state.peer_entries.insert(peer_b.clone(), view![hash]); + state.peer_entries.insert(peer_a.clone(), view![hash]); let msg = BitfieldGossipMessage { relay_parent: hash.clone(), @@ -737,7 +761,10 @@ fn do_not_send_message_back_to_origin() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -759,7 +786,7 @@ fn do_not_send_message_back_to_origin() { ) => { assert_eq!(1, peers.len()); assert!(peers.contains(&peer_a)); - assert_eq!(send_msg, msg.clone().into_validation_protocol()); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V1.into())); } ); @@ -835,7 +862,7 @@ fn topology_test() { .expect("should be signed"); peers_x.iter().chain(peers_y.iter()).for_each(|peer| { - state.peer_views.insert(peer.clone(), view![hash]); + state.peer_entries.insert(peer.clone(), view![hash]); }); let msg = BitfieldGossipMessage { @@ -853,7 +880,10 @@ fn topology_test() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peers_x[0].clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peers_x[0].clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -880,7 +910,7 @@ fn topology_test() { assert!(topology.peers_x.iter().filter(|peer| peers.contains(&peer)).count() == 4); // Must never include originator assert!(!peers.contains(&peers_x[0])); - assert_eq!(send_msg, msg.clone().into_validation_protocol()); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V1.into())); } ); From 33359cbef9ebbcf0b16e1cb12855a2e451f348a4 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 25 Apr 2023 13:20:51 +0000 Subject: [PATCH 045/105] more network protocol test fixes Signed-off-by: Andrei Sandu --- node/network/bitfield-distribution/src/lib.rs | 27 +++++---- .../bitfield-distribution/src/tests.rs | 55 +++++++++++++++---- .../statement-distribution/src/tests.rs | 12 +++- 3 files changed, 69 insertions(+), 25 deletions(-) diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 71769a62e86d..892e4c1d9079 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -96,6 +96,7 @@ impl BitfieldGossipMessage { } // We keep track of each peer view and protocol version using this struct. +#[derive(Debug, PartialEq)] struct PeerEntry { pub view: View, pub version: net_protocol::peer_set::ProtocolVersion, @@ -452,17 +453,21 @@ async fn relay_message( let v2_peers = filter_by_peer_version(&interested_peers, ValidationVersion::VStaging.into()); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v1_peers, - message.clone().into_validation_protocol(ValidationVersion::V1.into()), - )) - .await; - - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v2_peers, - message.into_validation_protocol(ValidationVersion::VStaging.into()), - )) - .await; + if v1_peers.len() > 0 { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + v1_peers, + message.clone().into_validation_protocol(ValidationVersion::V1.into()), + )) + .await; + } + + if v2_peers.len() > 0 { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + v2_peers, + message.into_validation_protocol(ValidationVersion::VStaging.into()), + )) + .await; + } } } diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index 5a7ef85789bd..a10cfd759b33 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -83,7 +83,16 @@ fn prewarmed_state( span: PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"), }, }, - peer_entries: peers.iter().cloned().map(|peer| (peer, view!(relay_parent))).collect(), + peer_entries: peers + .iter() + .cloned() + .map(|peer| { + ( + peer, + PeerEntry { view: view!(relay_parent), version: ValidationVersion::V1.into() }, + ) + }) + .collect(), topologies, view: our_view!(relay_parent), } @@ -229,7 +238,10 @@ fn receive_invalid_signature() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), invalid_msg_2.into_network_message(ValidationVersion::V1.into())), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + invalid_msg_2.into_network_message(ValidationVersion::V1.into()) + ), &mut rng, )); // reputation change due to invalid signature @@ -263,7 +275,10 @@ fn receive_invalid_validator_index() { let (mut state, signing_context, keystore, validator) = state_with_view(our_view![hash_a, hash_b], hash_a.clone()); - state.peer_entries.insert(peer_b.clone(), view![hash_a]); + state.peer_entries.insert( + peer_b.clone(), + PeerEntry { view: view![hash_a], version: ValidationVersion::V1.into() }, + ); let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); let signed = Signed::::sign( @@ -289,7 +304,10 @@ fn receive_invalid_validator_index() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.into_network_message(ValidationVersion::V1.into())), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.into_network_message(ValidationVersion::V1.into()) + ), &mut rng, )); @@ -459,8 +477,14 @@ fn do_not_relay_message_twice() { .flatten() .expect("should be signed"); - state.peer_entries.insert(peer_b.clone(), view![hash]); - state.peer_entries.insert(peer_a.clone(), view![hash]); + state.peer_entries.insert( + peer_b.clone(), + PeerEntry { view: view![hash], version: ValidationVersion::V1.into() }, + ); + state.peer_entries.insert( + peer_a.clone(), + PeerEntry { view: view![hash], version: ValidationVersion::V1.into() }, + ); let msg = BitfieldGossipMessage { relay_parent: hash.clone(), @@ -587,7 +611,7 @@ fn changing_view() { NetworkBridgeEvent::PeerConnected( peer_b.clone(), ObservedRole::Full, - ValidationVersion::VValidationVersion::V1.into(), + ValidationVersion::V1.into(), None ), &mut rng, @@ -650,7 +674,7 @@ fn changing_view() { assert!(state.peer_entries.contains_key(&peer_b)); assert_eq!( state.peer_entries.get(&peer_b).expect("Must contain value for peer B"), - &view![] + &PeerEntry { view: view![], version: ValidationVersion::V1.into() } ); // on rx of the same message, since we are not interested, @@ -743,8 +767,14 @@ fn do_not_send_message_back_to_origin() { .flatten() .expect("should be signed"); - state.peer_entries.insert(peer_b.clone(), view![hash]); - state.peer_entries.insert(peer_a.clone(), view![hash]); + state.peer_entries.insert( + peer_b.clone(), + PeerEntry { view: view![hash], version: ValidationVersion::V1.into() }, + ); + state.peer_entries.insert( + peer_a.clone(), + PeerEntry { view: view![hash], version: ValidationVersion::V1.into() }, + ); let msg = BitfieldGossipMessage { relay_parent: hash.clone(), @@ -862,7 +892,10 @@ fn topology_test() { .expect("should be signed"); peers_x.iter().chain(peers_y.iter()).for_each(|peer| { - state.peer_entries.insert(peer.clone(), view![hash]); + state.peer_entries.insert( + peer.clone(), + PeerEntry { view: view![hash], version: ValidationVersion::V1.into() }, + ); }); let msg = BitfieldGossipMessage { diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index f93d0932b306..73fa42c7b551 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -502,6 +502,7 @@ fn peer_view_update_sends_messages() { k }, maybe_authority: None, + version: ValidationVersion::V1.into(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -551,8 +552,12 @@ fn peer_view_update_sends_messages() { for statement in active_head.statements_about(candidate_hash) { let message = handle.recv().await; let expected_to = vec![peer.clone()]; - let expected_payload = - statement_message(hash_c, statement.statement.clone(), &Metrics::default()); + let expected_payload = statement_message( + hash_c, + statement.statement.clone(), + &Metrics::default(), + ValidationVersion::V1.into(), + ); assert_matches!( message, @@ -595,6 +600,7 @@ fn circulated_statement_goes_to_all_peers_with_view() { view: view.clone(), view_knowledge: view.iter().map(|v| (v.clone(), Default::default())).collect(), maybe_authority: None, + version: ValidationVersion::V1.into(), }; let mut peer_data: HashMap<_, _> = vec![ @@ -695,7 +701,7 @@ fn circulated_statement_goes_to_all_peers_with_view() { assert_eq!( payload, - statement_message(hash_b, statement.statement.clone(), &Metrics::default()), + statement_message(hash_b, statement.statement.clone(), &Metrics::default(), ValidationVersion::V1.into()), ); } ) From c8af81acc8629e7da62151144fc536762705d69b Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 25 Apr 2023 13:21:17 +0000 Subject: [PATCH 046/105] Fix network bridge tests using code from async branch Signed-off-by: Andrei Sandu --- node/network/bridge/src/rx/tests.rs | 304 +++++++++++++++++++++++++--- node/network/bridge/src/tx/tests.rs | 92 ++++++++- 2 files changed, 363 insertions(+), 33 deletions(-) diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index b16287f82f8a..1438a363e889 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -25,6 +25,7 @@ use parking_lot::Mutex; use std::{ collections::HashSet, sync::atomic::{AtomicBool, Ordering}, + task::Poll, }; use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName}; @@ -46,7 +47,7 @@ use polkadot_node_subsystem_test_helpers::{ SingleItemSink, SingleItemStream, TestSubsystemContextHandle, }; use polkadot_node_subsystem_util::metered; -use polkadot_primitives::{AuthorityDiscoveryId, Hash}; +use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash}; use sc_network::Multiaddr; use sp_keyring::Sr25519Keyring; @@ -136,8 +137,7 @@ impl Network for TestNetwork { } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let (peer_set, version) = self.protocol_names.try_get_protocol(&protocol).unwrap(); - assert_eq!(version, peer_set.get_main_version()); + let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap(); self.action_tx .lock() @@ -146,8 +146,7 @@ impl Network for TestNetwork { } fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - let (peer_set, version) = self.protocol_names.try_get_protocol(&protocol).unwrap(); - assert_eq!(version, peer_set.get_main_version()); + let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap(); self.action_tx .lock() @@ -189,10 +188,17 @@ impl TestNetworkHandle { v } - async fn connect_peer(&mut self, peer: PeerId, peer_set: PeerSet, role: ObservedRole) { + async fn connect_peer( + &mut self, + peer: PeerId, + protocol_version: ValidationVersion, + peer_set: PeerSet, + role: ObservedRole, + ) { + let protocol_version = ProtocolVersion::from(protocol_version); self.send_network_event(NetworkEvent::NotificationStreamOpened { remote: peer, - protocol: self.protocol_names.get_main_name(peer_set), + protocol: self.protocol_names.get_name(peer_set, protocol_version), negotiated_fallback: None, role: role.into(), received_handshake: vec![], @@ -405,10 +411,20 @@ fn send_our_view_upon_connection() { handle.await_mode_switch().await; network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; let view = view![head]; @@ -452,10 +468,20 @@ fn sends_view_updates_to_peers() { handle.await_mode_switch().await; network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer_b.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; let actions = network_handle.next_network_actions(2).await; @@ -513,10 +539,20 @@ fn do_not_send_view_update_until_synced() { assert_ne!(peer_a, peer_b); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer_b.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; { @@ -606,10 +642,20 @@ fn do_not_send_view_update_when_only_finalized_block_changed() { let peer_b = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_b.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; let hash_a = Hash::repeat_byte(1); @@ -665,7 +711,12 @@ fn peer_view_updates_sent_via_overseer() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; let view = view![Hash::repeat_byte(1)]; @@ -715,7 +766,12 @@ fn peer_messages_sent_via_overseer() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; // bridge will inform about all connected peers. @@ -787,10 +843,20 @@ fn peer_disconnect_from_just_one_peerset() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; // bridge will inform about all connected peers. @@ -880,10 +946,20 @@ fn relays_collation_protocol_messages() { let peer_b = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer_b.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; // bridge will inform about all connected peers. @@ -983,10 +1059,20 @@ fn different_views_on_different_peer_sets() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; // bridge will inform about all connected peers. @@ -1070,7 +1156,12 @@ fn sent_views_include_finalized_number_update() { let peer_a = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; let hash_a = Hash::repeat_byte(1); @@ -1115,7 +1206,12 @@ fn view_finalized_number_can_not_go_down() { let peer_a = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle @@ -1198,3 +1294,161 @@ fn our_view_updates_decreasing_order_and_limited_to_max() { virtual_overseer }); } + +#[test] +fn network_protocol_versioning_view_update() { + let (oracle, handle) = make_sync_oracle(false); + test_harness(Box::new(oracle), |test_harness| async move { + let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + + let peer_ids: Vec<_> = (0..2).map(|_| PeerId::random()).collect(); + let peers = [ + (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[1], PeerSet::Validation, ValidationVersion::V1), + ]; + + let head = Hash::repeat_byte(1); + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::start_work(ActivatedLeaf { + hash: head, + number: 1, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }), + ))) + .await; + + handle.await_mode_switch().await; + + for &(peer_id, peer_set, version) in &peers { + network_handle + .connect_peer(peer_id, version, peer_set, ObservedRole::Full) + .await; + } + + let view = view![head]; + let actions = network_handle.next_network_actions(2).await; + + for &(peer_id, peer_set, version) in &peers { + let wire_msg = match version { + ValidationVersion::V1 => + WireMessage::::ViewUpdate(view.clone()) + .encode(), + ValidationVersion::VStaging => + WireMessage::::ViewUpdate(view.clone()) + .encode(), + }; + assert_network_actions_contains( + &actions, + &NetworkAction::WriteNotification(peer_id, peer_set, wire_msg), + ); + } + + virtual_overseer + }); +} + +#[test] +fn network_protocol_versioning_subsystem_msg() { + let (oracle, _handle) = make_sync_oracle(false); + test_harness(Box::new(oracle), |test_harness| async move { + let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + + let peer = PeerId::random(); + + network_handle + .connect_peer( + peer.clone(), + ValidationVersion::VStaging, + PeerSet::Validation, + ObservedRole::Full, + ) + .await; + + // bridge will inform about all connected peers. + { + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerConnected( + peer.clone(), + ObservedRole::Full, + ValidationVersion::VStaging.into(), + None, + ), + &mut virtual_overseer, + ) + .await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + &mut virtual_overseer, + ) + .await; + } + + let approval_distribution_message = + protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + + let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + approval_distribution_message.clone(), + ); + + network_handle + .peer_message( + peer.clone(), + PeerSet::Validation, + WireMessage::ProtocolMessage(msg.clone()).encode(), + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ApprovalDistribution( + ApprovalDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + ) + ) => { + assert_eq!(p, peer); + assert_eq!(m, approval_distribution_message); + } + ); + + let metadata = protocol_v1::StatementMetadata { + relay_parent: Hash::zero(), + candidate_hash: CandidateHash::default(), + signed_by: ValidatorIndex(0), + signature: sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]), + }; + let statement_distribution_message = + protocol_vstaging::StatementDistributionMessage::LargeStatement(metadata); + + let msg = protocol_vstaging::ValidationProtocol::StatementDistribution( + statement_distribution_message.clone(), + ); + + network_handle + .peer_message( + peer.clone(), + PeerSet::Validation, + WireMessage::ProtocolMessage(msg.clone()).encode(), + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + ) + ) => { + assert_eq!(p, peer); + assert_eq!(m, statement_distribution_message); + } + ); + + // No more messages. + assert_matches!(futures::poll!(virtual_overseer.recv().boxed()), Poll::Pending); + + virtual_overseer + }); +} diff --git a/node/network/bridge/src/tx/tests.rs b/node/network/bridge/src/tx/tests.rs index 9853927e58c9..8ace3b2b1c6e 100644 --- a/node/network/bridge/src/tx/tests.rs +++ b/node/network/bridge/src/tx/tests.rs @@ -124,8 +124,7 @@ impl Network for TestNetwork { } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let (peer_set, version) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); - assert_eq!(version, peer_set.get_main_version()); + let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); self.action_tx .lock() @@ -134,8 +133,7 @@ impl Network for TestNetwork { } fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - let (peer_set, version) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); - assert_eq!(version, peer_set.get_main_version()); + let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); self.action_tx .lock() @@ -167,10 +165,17 @@ impl TestNetworkHandle { self.action_rx.next().await.expect("subsystem concluded early") } - async fn connect_peer(&mut self, peer: PeerId, peer_set: PeerSet, role: ObservedRole) { + async fn connect_peer( + &mut self, + peer: PeerId, + protocol_version: ValidationVersion, + peer_set: PeerSet, + role: ObservedRole, + ) { + let protocol_version = ProtocolVersion::from(protocol_version); self.send_network_event(NetworkEvent::NotificationStreamOpened { remote: peer, - protocol: self.peerset_protocol_names.get_main_name(peer_set), + protocol: self.peerset_protocol_names.get_name(peer_set, protocol_version), negotiated_fallback: None, role: role.into(), received_handshake: vec![], @@ -236,7 +241,12 @@ fn send_messages_to_peers() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .timeout(TIMEOUT) .await .expect("Timeout does not occur"); @@ -245,7 +255,12 @@ fn send_messages_to_peers() { // so the single item sink has to be free explicitly network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .timeout(TIMEOUT) .await .expect("Timeout does not occur"); @@ -322,3 +337,64 @@ fn send_messages_to_peers() { virtual_overseer }); } + +#[test] +fn network_protocol_versioning_send() { + test_harness(|test_harness| async move { + let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + + let peer_ids: Vec<_> = (0..2).map(|_| PeerId::random()).collect(); + let peers = [ + (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[1], PeerSet::Validation, ValidationVersion::V1), + ]; + + for &(peer_id, peer_set, version) in &peers { + network_handle + .connect_peer(peer_id, version, peer_set, ObservedRole::Full) + .timeout(TIMEOUT) + .await + .expect("Timeout does not occur"); + } + + // send a validation protocol message. + { + let approval_distribution_message = + protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + + let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + approval_distribution_message.clone(), + ); + + // Note that bridge doesn't ensure neither peer's protocol version + // or peer set match the message. + let receivers = vec![peer_ids[0], peer_ids[1]]; + virtual_overseer + .send(FromOrchestra::Communication { + msg: NetworkBridgeTxMessage::SendValidationMessage( + receivers.clone(), + Versioned::VStaging(msg.clone()), + ), + }) + .timeout(TIMEOUT) + .await + .expect("Timeout does not occur"); + + for peer in &receivers { + assert_eq!( + network_handle + .next_network_action() + .timeout(TIMEOUT) + .await + .expect("Timeout does not occur"), + NetworkAction::WriteNotification( + *peer, + PeerSet::Validation, + WireMessage::ProtocolMessage(msg.clone()).encode(), + ) + ); + } + } + virtual_overseer + }); +} From 510c28a099eb68aa75ae667215dce152d0802e35 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 27 Apr 2023 14:23:29 +0000 Subject: [PATCH 047/105] heal merge damage :hospital: Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 8 +++++--- node/core/approval-voting/src/tests.rs | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index ecab95c9e567..b6b3651a6da2 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -1069,9 +1069,10 @@ mod tests { #[test] fn check_rejects_delay_bad_vrf() { check_mutated_assignments(40, 10, 8, |m| { + let vrf_signature = garbage_vrf_signature(); match m.cert.kind.clone() { AssignmentCertKindV2::RelayVRFDelay { .. } => { - m.cert.vrf = garbage_vrf(); + m.cert.vrf = (vrf_signature.output, vrf_signature.proof); Some(false) }, _ => None, // skip everything else. @@ -1082,13 +1083,14 @@ mod tests { #[test] fn check_rejects_modulo_bad_vrf() { check_mutated_assignments(200, 100, 25, |m| { + let vrf_signature = garbage_vrf_signature(); match m.cert.kind.clone() { AssignmentCertKindV2::RelayVRFModulo { .. } => { - m.cert.vrf = garbage_vrf(); + m.cert.vrf = (vrf_signature.output, vrf_signature.proof); Some(false) }, AssignmentCertKindV2::RelayVRFModuloCompact { .. } => { - m.cert.vrf = garbage_vrf(); + m.cert.vrf = (vrf_signature.output, vrf_signature.proof); Some(false) }, _ => None, // skip everything else. diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 5faa0727786f..d8fffc0e00ae 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -403,7 +403,7 @@ fn garbage_assignment_cert_v2(kind: AssignmentCertKindV2) -> AssignmentCertV2 { let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); let out = inout.to_output(); - AssignmentCertV2 { kind, vrf: (VRFOutput(out), VRFProof(proof)) } + AssignmentCertV2 { kind, vrf: (VrfOutput(out), VrfProof(proof)) } } fn sign_approval( From 9f6f0e6642c3d2ec912caa1c165eeaf6ec50e8e6 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 2 May 2023 15:40:54 +0000 Subject: [PATCH 048/105] Bump test timeout to make CI happy when slow Signed-off-by: Andrei Sandu --- node/network/bitfield-distribution/src/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index 6b475ff9c73b..144e6be2818c 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -43,9 +43,9 @@ use std::{iter::FromIterator as _, sync::Arc, time::Duration}; macro_rules! launch { ($fut:expr) => { - $fut.timeout(Duration::from_millis(10)) + $fut.timeout(Duration::from_millis(20)) .await - .expect("10ms is more than enough for sending messages.") + .expect("20ms is more than enough for sending messages.") }; } From 274ea4612701182eaf079b880b071458f81f4f77 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 11 May 2023 09:37:03 +0000 Subject: [PATCH 049/105] approval primitives v1/v2 refactor Signed-off-by: Andrei Sandu --- .../approval-voting/src/approval_checking.rs | 2 +- .../approval-voting/src/approval_db/v1/mod.rs | 5 +- node/core/approval-voting/src/criteria.rs | 21 +- node/core/approval-voting/src/import.rs | 20 +- node/core/approval-voting/src/lib.rs | 4 +- .../approval-voting/src/persisted_entries.rs | 3 +- node/core/approval-voting/src/tests.rs | 15 +- node/core/approval-voting/src/time.rs | 2 +- node/network/approval-distribution/src/lib.rs | 4 +- .../approval-distribution/src/metrics.rs | 2 +- .../approval-distribution/src/tests.rs | 7 +- node/network/protocol/src/lib.rs | 5 +- node/primitives/src/approval.rs | 529 +++++++++--------- node/subsystem-types/src/messages.rs | 4 +- 14 files changed, 320 insertions(+), 303 deletions(-) diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index 055df9d1daf3..07129f4c4aa7 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -17,7 +17,7 @@ //! Utilities for checking whether a candidate has been approved under a given block. use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice}; -use polkadot_node_primitives::approval::DelayTranche; +use polkadot_node_primitives::approval::v1::DelayTranche; use polkadot_primitives::ValidatorIndex; use crate::{ diff --git a/node/core/approval-voting/src/approval_db/v1/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs index bce7d6c957d6..7d01f65127f5 100644 --- a/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -17,7 +17,10 @@ //! Version 1 of the DB schema. use parity_scale_codec::{Decode, Encode}; -use polkadot_node_primitives::approval::{v2::CoreBitfield, AssignmentCertV2, DelayTranche}; +use polkadot_node_primitives::approval::{ + v1::DelayTranche, + v2::{AssignmentCertV2, CoreBitfield}, +}; use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{ diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index b6b3651a6da2..5a1e48f7e7b1 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -18,8 +18,9 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ - self as approval_types, v2::CoreBitfield, AssignmentCert, AssignmentCertKind, - AssignmentCertKindV2, AssignmentCertV2, DelayTranche, RelayVRFStory, + self as approval_types, + v1::{AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory}, + v2::{AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, VrfOutput, VrfProof, VrfSignature}, }; use polkadot_primitives::{ AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, IndexedVec, SessionInfo, @@ -448,9 +449,9 @@ fn compute_relay_vrf_modulo_assignments( // has been executed. let cert = AssignmentCert { kind: AssignmentCertKind::RelayVRFModulo { sample: rvm_sample }, - vrf: approval_types::VrfSignature { - output: approval_types::VrfOutput(vrf_in_out.to_output()), - proof: approval_types::VrfProof(vrf_proof), + vrf: VrfSignature { + output: VrfOutput(vrf_in_out.to_output()), + proof: VrfProof(vrf_proof), }, }; @@ -530,10 +531,7 @@ fn compute_relay_vrf_modulo_assignments_v2( kind: AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield: assignment_bitfield.clone(), }, - vrf: ( - approval_types::VrfOutput(vrf_in_out.to_output()), - approval_types::VrfProof(vrf_proof), - ), + vrf: (VrfOutput(vrf_in_out.to_output()), VrfProof(vrf_proof)), }; // All assignments of type RelayVRFModulo have tranche 0. @@ -565,10 +563,7 @@ fn compute_relay_vrf_delay_assignments( let cert = AssignmentCertV2 { kind: AssignmentCertKindV2::RelayVRFDelay { core_index: core }, - vrf: ( - approval_types::VrfOutput(vrf_in_out.to_output()), - approval_types::VrfProof(vrf_proof), - ), + vrf: (VrfOutput(vrf_in_out.to_output()), VrfProof(vrf_proof)), }; let our_assignment = OurAssignment { diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index 298785385f6e..dec6a49027a2 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -30,7 +30,10 @@ use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ - approval::{self as approval_types, BlockApprovalMeta, RelayVRFStory}, + approval::{ + self as approval_types, + v1::{BlockApprovalMeta, RelayVRFStory}, + }, MAX_FINALITY_LAG, }; use polkadot_node_subsystem::{ @@ -94,7 +97,7 @@ enum ImportedBlockInfoError { FutureCancelled(&'static str, futures::channel::oneshot::Canceled), #[error(transparent)] - ApprovalError(approval_types::ApprovalError), + ApprovalError(approval_types::v1::ApprovalError), #[error("block is from an ancient session")] BlockFromAncientSession, @@ -223,7 +226,7 @@ async fn imported_block_info( }; let (assignments, slot, relay_vrf_story) = { - let unsafe_vrf = approval_types::babe_unsafe_vrf_info(&block_header); + let unsafe_vrf = approval_types::v1::babe_unsafe_vrf_info(&block_header); match unsafe_vrf { Some(unsafe_vrf) => { @@ -612,7 +615,7 @@ pub(crate) mod tests { use crate::approval_db::v1::DbBackend; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; - use polkadot_node_primitives::approval::{VrfSignature, VrfTranscript}; + use polkadot_node_primitives::approval::v1::{VrfSignature, VrfTranscript}; use polkadot_node_subsystem::messages::{AllMessages, ApprovalVotingMessage}; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_node_subsystem_util::database::Database; @@ -677,7 +680,7 @@ pub(crate) mod tests { fn compute_assignments( &self, _keystore: &LocalKeystore, - _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, + _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, _config: &criteria::Config, _leaving_cores: Vec<( CandidateHash, @@ -693,10 +696,11 @@ pub(crate) mod tests { _claimed_core_bitfield: polkadot_node_primitives::approval::v2::CoreBitfield, _validator_index: polkadot_primitives::ValidatorIndex, _config: &criteria::Config, - _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, - _assignment: &polkadot_node_primitives::approval::AssignmentCertV2, + _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, + _assignment: &polkadot_node_primitives::approval::v2::AssignmentCertV2, _backing_groups: Vec, - ) -> Result { + ) -> Result + { Ok(0) } } diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 4503cc8778ec..1f807a8e6836 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -25,8 +25,8 @@ use jaeger::{hash_to_trace_identifier, PerLeafSpan}; use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ - v2::{BitfieldError, CandidateBitfield, CoreBitfield}, - BlockApprovalMeta, DelayTranche, IndirectAssignmentCertV2, IndirectSignedApprovalVote, + v1::{BlockApprovalMeta, DelayTranche, IndirectSignedApprovalVote}, + v2::{BitfieldError, CandidateBitfield, CoreBitfield, IndirectAssignmentCertV2}, }, ValidationResult, }; diff --git a/node/core/approval-voting/src/persisted_entries.rs b/node/core/approval-voting/src/persisted_entries.rs index 28d04f2d6a98..e2a55f022194 100644 --- a/node/core/approval-voting/src/persisted_entries.rs +++ b/node/core/approval-voting/src/persisted_entries.rs @@ -21,7 +21,8 @@ //! data and logic are intertwined. use polkadot_node_primitives::approval::{ - v2::CoreBitfield, AssignmentCertV2, DelayTranche, RelayVRFStory, + v1::{DelayTranche, RelayVRFStory}, + v2::{AssignmentCertV2, CoreBitfield}, }; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index d8fffc0e00ae..8f7580f9608b 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -19,8 +19,11 @@ use crate::tests::test_constants::TEST_CONFIG; use super::*; use polkadot_node_primitives::{ approval::{ - v1::RELAY_VRF_MODULO_CONTEXT, AssignmentCert, AssignmentCertKind, AssignmentCertKindV2, - AssignmentCertV2, DelayTranche, VrfOutput, VrfProof, VrfSignature, + v1::{ + AssignmentCert, AssignmentCertKind, DelayTranche, VrfOutput, VrfProof, VrfSignature, + RELAY_VRF_MODULO_CONTEXT, + }, + v2::{AssignmentCertKindV2, AssignmentCertV2}, }, AvailableData, BlockData, PoV, }; @@ -235,7 +238,7 @@ where fn compute_assignments( &self, _keystore: &LocalKeystore, - _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, + _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, _config: &criteria::Config, _leaving_cores: Vec<( CandidateHash, @@ -251,10 +254,10 @@ where _claimed_core_bitfield: polkadot_node_primitives::approval::v2::CoreBitfield, validator_index: ValidatorIndex, _config: &criteria::Config, - _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory, - _assignment: &polkadot_node_primitives::approval::AssignmentCertV2, + _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, + _assignment: &polkadot_node_primitives::approval::v2::AssignmentCertV2, _backing_groups: Vec, - ) -> Result { + ) -> Result { self.1(validator_index) } } diff --git a/node/core/approval-voting/src/time.rs b/node/core/approval-voting/src/time.rs index 34132dc22b23..a45866402c82 100644 --- a/node/core/approval-voting/src/time.rs +++ b/node/core/approval-voting/src/time.rs @@ -17,7 +17,7 @@ //! Time utilities for approval voting. use futures::prelude::*; -use polkadot_node_primitives::approval::DelayTranche; +use polkadot_node_primitives::approval::v1::DelayTranche; use sp_consensus_slots::Slot; use std::{ pin::Pin, diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index d7f38e00838c..03b0638358b9 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -33,8 +33,8 @@ use polkadot_node_network_protocol::{ Versioned, View, }; use polkadot_node_primitives::approval::{ - v2::{AsBitIndex, CandidateBitfield}, - BlockApprovalMeta, IndirectAssignmentCertV2, IndirectSignedApprovalVote, + v1::{BlockApprovalMeta, IndirectSignedApprovalVote}, + v2::{AsBitIndex, CandidateBitfield, IndirectAssignmentCertV2}, }; use polkadot_node_subsystem::{ messages::{ diff --git a/node/network/approval-distribution/src/metrics.rs b/node/network/approval-distribution/src/metrics.rs index bce87039f441..5c09344a73e4 100644 --- a/node/network/approval-distribution/src/metrics.rs +++ b/node/network/approval-distribution/src/metrics.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use polkadot_node_metrics::metrics::{prometheus, Metrics as MetricsTrait}; -use polkadot_node_primitives::approval::AssignmentCertKindV2; +use polkadot_node_primitives::approval::v2::AssignmentCertKindV2; /// Approval Distribution metrics. #[derive(Default, Clone)] diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 7c78e4bb86da..81c138ebd5df 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -24,8 +24,11 @@ use polkadot_node_network_protocol::{ view, ObservedRole, }; use polkadot_node_primitives::approval::{ - v2::RELAY_VRF_MODULO_CONTEXT, AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, - VrfOutput, VrfProof, VrfSignature, + v1::{ + AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, VrfOutput, VrfProof, + VrfSignature, + }, + v2::RELAY_VRF_MODULO_CONTEXT, }; use polkadot_node_subsystem::messages::{network_bridge_event, AllMessages, ApprovalCheckError}; use polkadot_node_subsystem_test_helpers as test_helpers; diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 3cd0fc023bbb..c26ddeb14d6f 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -428,7 +428,8 @@ impl_versioned_try_from!( pub mod vstaging { use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ - v2::CandidateBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVote, + v1::IndirectSignedApprovalVote, + v2::{CandidateBitfield, IndirectAssignmentCertV2}, }; // Re-export stuff that has not changed since v1. @@ -481,7 +482,7 @@ pub mod v1 { }; use polkadot_node_primitives::{ - approval::{IndirectAssignmentCert, IndirectSignedApprovalVote}, + approval::v1::{IndirectAssignmentCert, IndirectSignedApprovalVote}, UncheckedSignedFullStatement, }; diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 4af2e29e878c..171e6ce87bb5 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -16,22 +16,24 @@ //! Types relevant for approval. -pub use sp_consensus_babe::{Randomness, Slot, VrfOutput, VrfProof, VrfSignature, VrfTranscript}; +/// A list of primitives introduced in v1. +pub mod v1 { + use sp_consensus_babe as babe_primitives; + pub use sp_consensus_babe::{ + Randomness, Slot, VrfOutput, VrfProof, VrfSignature, VrfTranscript, + }; -use parity_scale_codec::{Decode, Encode}; -use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateIndex, CoreIndex, Hash, Header, SessionIndex, - ValidatorIndex, ValidatorSignature, -}; -use sp_application_crypto::ByteArray; -use sp_consensus_babe as babe_primitives; + use parity_scale_codec::{Decode, Encode}; + use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateIndex, CoreIndex, Hash, Header, SessionIndex, + ValidatorIndex, ValidatorSignature, + }; + use sp_application_crypto::ByteArray; -/// Validators assigning to check a particular candidate are split up into tranches. -/// Earlier tranches of validators check first, with later tranches serving as backup. -pub type DelayTranche = u32; + /// Validators assigning to check a particular candidate are split up into tranches. + /// Earlier tranches of validators check first, with later tranches serving as backup. + pub type DelayTranche = u32; -/// Static contexts use to generate randomness for v1 assignments. -pub mod v1 { /// A static context used to compute the Relay VRF story based on the /// VRF output included in the header-chain. pub const RELAY_VRF_STORY_CONTEXT: &[u8] = b"A&V RC-VRF"; @@ -50,15 +52,173 @@ pub mod v1 { /// A static context associated with producing randomness for a tranche. pub const TRANCHE_RANDOMNESS_CONTEXT: &[u8] = b"A&V TRANCHE"; + + /// random bytes derived from the VRF submitted within the block by the + /// block author as a credential and used as input to approval assignment criteria. + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub struct RelayVRFStory(pub [u8; 32]); + + /// Different kinds of input data or criteria that can prove a validator's assignment + /// to check a particular parachain. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum AssignmentCertKind { + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModulo { + /// The sample number used in this cert. + sample: u32, + }, + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with the index of a particular core. + /// + /// The context is [`RELAY_VRF_DELAY_CONTEXT`] + RelayVRFDelay { + /// The core index chosen in this cert. + core_index: CoreIndex, + }, + } + + /// A certification of assignment. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct AssignmentCert { + /// The criterion which is claimed to be met by this cert. + pub kind: AssignmentCertKind, + /// The VRF signature showing the criterion is met. + pub vrf: VrfSignature, + } + + /// An assignment criterion which refers to the candidate under which the assignment is + /// relevant by block hash. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct IndirectAssignmentCert { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The validator index. + pub validator: ValidatorIndex, + /// The cert itself. + pub cert: AssignmentCert, + } + + /// A signed approval vote which references the candidate indirectly via the block. + /// + /// In practice, we have a look-up from block hash and candidate index to candidate hash, + /// so this can be transformed into a `SignedApprovalVote`. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct IndirectSignedApprovalVote { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The index of the candidate in the list of candidates fully included as-of the block. + pub candidate_index: CandidateIndex, + /// The validator index. + pub validator: ValidatorIndex, + /// The signature by the validator. + pub signature: ValidatorSignature, + } + + /// Metadata about a block which is now live in the approval protocol. + #[derive(Debug)] + pub struct BlockApprovalMeta { + /// The hash of the block. + pub hash: Hash, + /// The number of the block. + pub number: BlockNumber, + /// The hash of the parent block. + pub parent_hash: Hash, + /// The candidates included by the block. + /// Note that these are not the same as the candidates that appear within the block body. + pub candidates: Vec, + /// The consensus slot of the block. + pub slot: Slot, + /// The session of the block. + pub session: SessionIndex, + } + + /// Errors that can occur during the approvals protocol. + #[derive(Debug, thiserror::Error)] + #[allow(missing_docs)] + pub enum ApprovalError { + #[error("Schnorrkel signature error")] + SchnorrkelSignature(schnorrkel::errors::SignatureError), + #[error("Authority index {0} out of bounds")] + AuthorityOutOfBounds(usize), + } + + /// An unsafe VRF output. Provide BABE Epoch info to create a `RelayVRFStory`. + pub struct UnsafeVRFOutput { + vrf_output: VrfOutput, + slot: Slot, + authority_index: u32, + } + + impl UnsafeVRFOutput { + /// Get the slot. + pub fn slot(&self) -> Slot { + self.slot + } + + /// Compute the randomness associated with this VRF output. + pub fn compute_randomness( + self, + authorities: &[(babe_primitives::AuthorityId, babe_primitives::BabeAuthorityWeight)], + randomness: &babe_primitives::Randomness, + epoch_index: u64, + ) -> Result { + let author = match authorities.get(self.authority_index as usize) { + None => return Err(ApprovalError::AuthorityOutOfBounds(self.authority_index as _)), + Some(x) => &x.0, + }; + + let pubkey = schnorrkel::PublicKey::from_bytes(author.as_slice()) + .map_err(ApprovalError::SchnorrkelSignature)?; + + let transcript = sp_consensus_babe::make_transcript(randomness, self.slot, epoch_index); + + let inout = self + .vrf_output + .0 + .attach_input_hash(&pubkey, transcript.0) + .map_err(ApprovalError::SchnorrkelSignature)?; + Ok(RelayVRFStory(inout.make_bytes(super::v1::RELAY_VRF_STORY_CONTEXT))) + } + } + + /// Extract the slot number and relay VRF from a header. + /// + /// This fails if either there is no BABE `PreRuntime` digest or + /// the digest has type `SecondaryPlain`, which Substrate nodes do + /// not produce or accept anymore. + pub fn babe_unsafe_vrf_info(header: &Header) -> Option { + use babe_primitives::digests::CompatibleDigestItem; + + for digest in &header.digest.logs { + if let Some(pre) = digest.as_babe_pre_digest() { + let slot = pre.slot(); + let authority_index = pre.authority_index(); + + return pre.vrf_signature().map(|sig| UnsafeVRFOutput { + vrf_output: sig.output.clone(), + slot, + authority_index, + }) + } + } + + None + } } /// A list of primitives introduced by v2. pub mod v2 { use parity_scale_codec::{Decode, Encode}; + pub use sp_consensus_babe::{ + Randomness, Slot, VrfOutput, VrfProof, VrfSignature, VrfTranscript, + }; use std::ops::BitOr; - use super::{CandidateIndex, CoreIndex}; use bitvec::{prelude::Lsb0, vec::BitVec}; + use polkadot_primitives::{CandidateIndex, CoreIndex, Hash, ValidatorIndex}; /// A static context associated with producing randomness for a core. pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE v2"; @@ -191,271 +351,118 @@ pub mod v2 { )) } } -} - -/// random bytes derived from the VRF submitted within the block by the -/// block author as a credential and used as input to approval assignment criteria. -#[derive(Debug, Clone, Encode, Decode, PartialEq)] -pub struct RelayVRFStory(pub [u8; 32]); - -/// Different kinds of input data or criteria that can prove a validator's assignment -/// to check a particular parachain. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum AssignmentCertKind { - /// An assignment story based on the VRF that authorized the relay-chain block where the - /// candidate was included combined with a sample number. - /// - /// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`] - RelayVRFModulo { - /// The sample number used in this cert. - sample: u32, - }, - /// An assignment story based on the VRF that authorized the relay-chain block where the - /// candidate was included combined with the index of a particular core. - /// - /// The context is [`RELAY_VRF_DELAY_CONTEXT`] - RelayVRFDelay { - /// The core index chosen in this cert. - core_index: CoreIndex, - }, -} - -/// Certificate is changed compared to `AssignmentCertKind`: -/// - introduced RelayVRFModuloCompact -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum AssignmentCertKindV2 { - /// An assignment story based on the VRF that authorized the relay-chain block where the - /// candidate was included combined with a sample number. - /// - /// The context used to produce bytes is [`v2::RELAY_VRF_MODULO_CONTEXT`] - RelayVRFModulo { - /// The sample number used in this cert. - sample: u32, - }, - /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the - /// candidates were included. - /// - /// The context is [`v2::RELAY_VRF_MODULO_CONTEXT`] - RelayVRFModuloCompact { - /// A bitfield representing the core indices claimed by this assignment. - core_bitfield: super::approval::v2::CoreBitfield, - }, - /// An assignment story based on the VRF that authorized the relay-chain block where the - /// candidate was included combined with the index of a particular core. - /// - /// The context is [`v2::RELAY_VRF_DELAY_CONTEXT`] - RelayVRFDelay { - /// The core index chosen in this cert. - core_index: CoreIndex, - }, -} - -/// A certification of assignment. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub struct AssignmentCert { - /// The criterion which is claimed to be met by this cert. - pub kind: AssignmentCertKind, - /// The VRF signature showing the criterion is met. - pub vrf: VrfSignature, -} - -/// A certification of assignment. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub struct AssignmentCertV2 { - /// The criterion which is claimed to be met by this cert. - pub kind: AssignmentCertKindV2, - /// The VRF showing the criterion is met. - pub vrf: (VrfOutput, VrfProof), -} -impl From for AssignmentCertV2 { - fn from(cert: AssignmentCert) -> Self { - Self { - kind: match cert.kind { - AssignmentCertKind::RelayVRFDelay { core_index } => - AssignmentCertKindV2::RelayVRFDelay { core_index }, - AssignmentCertKind::RelayVRFModulo { sample } => - AssignmentCertKindV2::RelayVRFModulo { sample }, - }, - vrf: (cert.vrf.output, cert.vrf.proof), - } + /// Certificate is changed compared to `AssignmentCertKind`: + /// - introduced RelayVRFModuloCompact + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum AssignmentCertKindV2 { + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`v2::RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModulo { + /// The sample number used in this cert. + sample: u32, + }, + /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the + /// candidates were included. + /// + /// The context is [`v2::RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModuloCompact { + /// A bitfield representing the core indices claimed by this assignment. + core_bitfield: CoreBitfield, + }, + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with the index of a particular core. + /// + /// The context is [`v2::RELAY_VRF_DELAY_CONTEXT`] + RelayVRFDelay { + /// The core index chosen in this cert. + core_index: CoreIndex, + }, } -} -/// Errors that can occur when trying to convert to/from assignment v1/v2 -#[derive(Debug)] -pub enum AssignmentConversionError { - /// Assignment certificate is not supported in v1. - CertificateNotSupported, -} - -impl TryFrom for AssignmentCert { - type Error = AssignmentConversionError; - fn try_from(cert: AssignmentCertV2) -> Result { - Ok(Self { - kind: match cert.kind { - AssignmentCertKindV2::RelayVRFDelay { core_index } => - AssignmentCertKind::RelayVRFDelay { core_index }, - AssignmentCertKindV2::RelayVRFModulo { sample } => - AssignmentCertKind::RelayVRFModulo { sample }, - // Not supported - _ => return Err(AssignmentConversionError::CertificateNotSupported), - }, - vrf: VrfSignature { output: cert.vrf.0, proof: cert.vrf.1 }, - }) + /// A certification of assignment. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct AssignmentCertV2 { + /// The criterion which is claimed to be met by this cert. + pub kind: AssignmentCertKindV2, + /// The VRF showing the criterion is met. + pub vrf: (VrfOutput, VrfProof), } -} -/// An assignment criterion which refers to the candidate under which the assignment is -/// relevant by block hash. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub struct IndirectAssignmentCert { - /// A block hash where the candidate appears. - pub block_hash: Hash, - /// The validator index. - pub validator: ValidatorIndex, - /// The cert itself. - pub cert: AssignmentCert, -} -/// An assignment criterion which refers to the candidate under which the assignment is -/// relevant by block hash. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub struct IndirectAssignmentCertV2 { - /// A block hash where the candidate appears. - pub block_hash: Hash, - /// The validator index. - pub validator: ValidatorIndex, - /// The cert itself. - pub cert: AssignmentCertV2, -} -impl From for IndirectAssignmentCertV2 { - fn from(indirect_cert: IndirectAssignmentCert) -> Self { - Self { - block_hash: indirect_cert.block_hash, - validator: indirect_cert.validator, - cert: indirect_cert.cert.into(), + impl From for AssignmentCertV2 { + fn from(cert: super::v1::AssignmentCert) -> Self { + Self { + kind: match cert.kind { + super::v1::AssignmentCertKind::RelayVRFDelay { core_index } => + AssignmentCertKindV2::RelayVRFDelay { core_index }, + super::v1::AssignmentCertKind::RelayVRFModulo { sample } => + AssignmentCertKindV2::RelayVRFModulo { sample }, + }, + vrf: (cert.vrf.output, cert.vrf.proof), + } } } -} -impl TryFrom for IndirectAssignmentCert { - type Error = AssignmentConversionError; - fn try_from( - indirect_cert: IndirectAssignmentCertV2, - ) -> Result { - Ok(Self { - block_hash: indirect_cert.block_hash, - validator: indirect_cert.validator, - cert: indirect_cert.cert.try_into()?, - }) + /// Errors that can occur when trying to convert to/from assignment v1/v2 + #[derive(Debug)] + pub enum AssignmentConversionError { + /// Assignment certificate is not supported in v1. + CertificateNotSupported, } -} - -/// A signed approval vote which references the candidate indirectly via the block. -/// -/// In practice, we have a look-up from block hash and candidate index to candidate hash, -/// so this can be transformed into a `SignedApprovalVote`. -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub struct IndirectSignedApprovalVote { - /// A block hash where the candidate appears. - pub block_hash: Hash, - /// The index of the candidate in the list of candidates fully included as-of the block. - pub candidate_index: CandidateIndex, - /// The validator index. - pub validator: ValidatorIndex, - /// The signature by the validator. - pub signature: ValidatorSignature, -} - -/// Metadata about a block which is now live in the approval protocol. -#[derive(Debug)] -pub struct BlockApprovalMeta { - /// The hash of the block. - pub hash: Hash, - /// The number of the block. - pub number: BlockNumber, - /// The hash of the parent block. - pub parent_hash: Hash, - /// The candidates included by the block. - /// Note that these are not the same as the candidates that appear within the block body. - pub candidates: Vec, - /// The consensus slot of the block. - pub slot: Slot, - /// The session of the block. - pub session: SessionIndex, -} - -/// Errors that can occur during the approvals protocol. -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum ApprovalError { - #[error("Schnorrkel signature error")] - SchnorrkelSignature(schnorrkel::errors::SignatureError), - #[error("Authority index {0} out of bounds")] - AuthorityOutOfBounds(usize), -} -/// An unsafe VRF output. Provide BABE Epoch info to create a `RelayVRFStory`. -pub struct UnsafeVRFOutput { - vrf_output: VrfOutput, - slot: Slot, - authority_index: u32, -} + impl TryFrom for super::v1::AssignmentCert { + type Error = AssignmentConversionError; + fn try_from(cert: AssignmentCertV2) -> Result { + Ok(Self { + kind: match cert.kind { + AssignmentCertKindV2::RelayVRFDelay { core_index } => + super::v1::AssignmentCertKind::RelayVRFDelay { core_index }, + AssignmentCertKindV2::RelayVRFModulo { sample } => + super::v1::AssignmentCertKind::RelayVRFModulo { sample }, + // Not supported + _ => return Err(AssignmentConversionError::CertificateNotSupported), + }, + vrf: VrfSignature { output: cert.vrf.0, proof: cert.vrf.1 }, + }) + } + } -impl UnsafeVRFOutput { - /// Get the slot. - pub fn slot(&self) -> Slot { - self.slot + /// An assignment criterion which refers to the candidate under which the assignment is + /// relevant by block hash. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct IndirectAssignmentCertV2 { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The validator index. + pub validator: ValidatorIndex, + /// The cert itself. + pub cert: AssignmentCertV2, } - /// Compute the randomness associated with this VRF output. - pub fn compute_randomness( - self, - authorities: &[(babe_primitives::AuthorityId, babe_primitives::BabeAuthorityWeight)], - randomness: &babe_primitives::Randomness, - epoch_index: u64, - ) -> Result { - let author = match authorities.get(self.authority_index as usize) { - None => return Err(ApprovalError::AuthorityOutOfBounds(self.authority_index as _)), - Some(x) => &x.0, - }; - - let pubkey = schnorrkel::PublicKey::from_bytes(author.as_slice()) - .map_err(ApprovalError::SchnorrkelSignature)?; - - let transcript = sp_consensus_babe::make_transcript(randomness, self.slot, epoch_index); - - let inout = self - .vrf_output - .0 - .attach_input_hash(&pubkey, transcript.0) - .map_err(ApprovalError::SchnorrkelSignature)?; - Ok(RelayVRFStory(inout.make_bytes(v1::RELAY_VRF_STORY_CONTEXT))) + impl From for IndirectAssignmentCertV2 { + fn from(indirect_cert: super::v1::IndirectAssignmentCert) -> Self { + Self { + block_hash: indirect_cert.block_hash, + validator: indirect_cert.validator, + cert: indirect_cert.cert.into(), + } + } } -} -/// Extract the slot number and relay VRF from a header. -/// -/// This fails if either there is no BABE `PreRuntime` digest or -/// the digest has type `SecondaryPlain`, which Substrate nodes do -/// not produce or accept anymore. -pub fn babe_unsafe_vrf_info(header: &Header) -> Option { - use babe_primitives::digests::CompatibleDigestItem; - - for digest in &header.digest.logs { - if let Some(pre) = digest.as_babe_pre_digest() { - let slot = pre.slot(); - let authority_index = pre.authority_index(); - - return pre.vrf_signature().map(|sig| UnsafeVRFOutput { - vrf_output: sig.output.clone(), - slot, - authority_index, + impl TryFrom for super::v1::IndirectAssignmentCert { + type Error = AssignmentConversionError; + fn try_from( + indirect_cert: IndirectAssignmentCertV2, + ) -> Result { + Ok(Self { + block_hash: indirect_cert.block_hash, + validator: indirect_cert.validator, + cert: indirect_cert.cert.try_into()?, }) } } - - None } #[cfg(test)] diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index d39e0e009252..7b774f70a86d 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -34,8 +34,8 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::{ approval::{ - v2::CandidateBitfield, BlockApprovalMeta, IndirectAssignmentCertV2, - IndirectSignedApprovalVote, + v1::{BlockApprovalMeta, IndirectSignedApprovalVote}, + v2::{CandidateBitfield, IndirectAssignmentCertV2}, }, AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, CollationSecondedSignal, DisputeMessage, DisputeStatus, ErasureChunk, PoV, From 9f66ed16ae8ce6f1e6af52ba54d67df8dea0c4ff Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 11 May 2023 09:42:25 +0000 Subject: [PATCH 050/105] remove leftover sleep in CI test pipeline Signed-off-by: Andrei Sandu --- scripts/ci/gitlab/pipeline/test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 3ed576d1fc81..d66ede4e03ef 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -42,7 +42,6 @@ test-linux-stable: RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - time cargo test --workspace --profile testnet --verbose --locked --features=runtime-benchmarks,runtime-metrics,try-runtime - - sleep 1 .check-dependent-project: &check-dependent-project stage: test From 6274c374235778829479d7541a4cfd612e3f333a Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 11 May 2023 09:59:00 +0000 Subject: [PATCH 051/105] Fix tests and enable v2 assignments in tests Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 21 +++++++++++++++++++-- node/primitives/src/approval.rs | 7 +++---- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 5a1e48f7e7b1..0deacf4351d6 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -977,7 +977,7 @@ mod tests { }; let relay_vrf_story = RelayVRFStory([42u8; 32]); - let assignments = compute_assignments( + let mut assignments = compute_assignments( &keystore, relay_vrf_story.clone(), &config, @@ -993,6 +993,23 @@ mod tests { false, ); + // Extend with v2 assignments as well + assignments.extend(compute_assignments( + &keystore, + relay_vrf_story.clone(), + &config, + (0..n_cores) + .map(|i| { + ( + CandidateHash(Hash::repeat_byte(i as u8)), + CoreIndex(i as u32), + group_for_core(i), + ) + }) + .collect::>(), + true, + )); + let mut counted = 0; for (core, assignment) in assignments { let cores = match assignment.cert.kind.clone() { @@ -1101,7 +1118,7 @@ mod tests { m.config.relay_vrf_modulo_samples = sample; Some(false) }, - AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield: _ } => Some(false), + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield: _ } => Some(true), _ => None, // skip everything else. } }); diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 171e6ce87bb5..7e7334113fed 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -467,10 +467,9 @@ pub mod v2 { #[cfg(test)] mod test { - use super::{ - v2::{BitIndex, Bitfield}, - *, - }; + use super::v2::{BitIndex, Bitfield}; + + use polkadot_primitives::{CandidateIndex, CoreIndex}; #[test] fn test_assignment_bitfield_from_vec() { From 28dd20344b08bdebb75b5d4d99b0a0c94e3fcb28 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 17 May 2023 12:19:59 +0000 Subject: [PATCH 052/105] add/modify approval distribution tests for v2 Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 20 +- node/core/approval-voting/src/tests.rs | 2 +- .../approval-distribution/src/tests.rs | 209 +++++++++++++++--- node/primitives/src/approval.rs | 6 +- 4 files changed, 197 insertions(+), 40 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 0deacf4351d6..a76912a8fa3d 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -531,7 +531,10 @@ fn compute_relay_vrf_modulo_assignments_v2( kind: AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield: assignment_bitfield.clone(), }, - vrf: (VrfOutput(vrf_in_out.to_output()), VrfProof(vrf_proof)), + vrf: VrfSignature { + output: VrfOutput(vrf_in_out.to_output()), + proof: VrfProof(vrf_proof), + }, }; // All assignments of type RelayVRFModulo have tranche 0. @@ -563,7 +566,10 @@ fn compute_relay_vrf_delay_assignments( let cert = AssignmentCertV2 { kind: AssignmentCertKindV2::RelayVRFDelay { core_index: core }, - vrf: (VrfOutput(vrf_in_out.to_output()), VrfProof(vrf_proof)), + vrf: VrfSignature { + output: VrfOutput(vrf_in_out.to_output()), + proof: VrfProof(vrf_proof), + }, }; let our_assignment = OurAssignment { @@ -685,7 +691,9 @@ pub(crate) fn check_assignment_cert( } } - let (vrf_output, vrf_proof) = &assignment.vrf; + let vrf_output = &assignment.vrf.output; + let vrf_proof = &assignment.vrf.proof; + match &assignment.kind { AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => { // Check that claimed core bitfield match the one from certificate. @@ -1084,7 +1092,7 @@ mod tests { let vrf_signature = garbage_vrf_signature(); match m.cert.kind.clone() { AssignmentCertKindV2::RelayVRFDelay { .. } => { - m.cert.vrf = (vrf_signature.output, vrf_signature.proof); + m.cert.vrf = vrf_signature; Some(false) }, _ => None, // skip everything else. @@ -1098,11 +1106,11 @@ mod tests { let vrf_signature = garbage_vrf_signature(); match m.cert.kind.clone() { AssignmentCertKindV2::RelayVRFModulo { .. } => { - m.cert.vrf = (vrf_signature.output, vrf_signature.proof); + m.cert.vrf = vrf_signature; Some(false) }, AssignmentCertKindV2::RelayVRFModuloCompact { .. } => { - m.cert.vrf = (vrf_signature.output, vrf_signature.proof); + m.cert.vrf = vrf_signature; Some(false) }, _ => None, // skip everything else. diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 8f7580f9608b..07eb6c22a591 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -406,7 +406,7 @@ fn garbage_assignment_cert_v2(kind: AssignmentCertKindV2) -> AssignmentCertV2 { let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); let out = inout.to_output(); - AssignmentCertV2 { kind, vrf: (VrfOutput(out), VrfProof(proof)) } + AssignmentCertV2 { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } } fn sign_approval( diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 81c138ebd5df..d0ab46ca700c 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -28,12 +28,15 @@ use polkadot_node_primitives::approval::{ AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, VrfOutput, VrfProof, VrfSignature, }, - v2::RELAY_VRF_MODULO_CONTEXT, + v2::{ + AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, IndirectAssignmentCertV2, + RELAY_VRF_MODULO_CONTEXT, + }, }; use polkadot_node_subsystem::messages::{network_bridge_event, AllMessages, ApprovalCheckError}; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt as _; -use polkadot_primitives::{AuthorityDiscoveryId, BlakeTwo256, HashT}; +use polkadot_primitives::{AuthorityDiscoveryId, BlakeTwo256, CoreIndex, HashT}; use polkadot_primitives_test_helpers::dummy_signature; use rand::SeedableRng; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; @@ -219,13 +222,14 @@ async fn setup_peer_with_view( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, view: View, + version: ValidationVersion, ) { overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( peer_id.clone(), ObservedRole::Full, - ValidationVersion::V1.into(), + version.into(), None, )), ) @@ -255,6 +259,21 @@ async fn send_message_from_peer( .await; } +async fn send_message_from_peer_v2( + virtual_overseer: &mut VirtualOverseer, + peer_id: &PeerId, + msg: protocol_vstaging::ApprovalDistributionMessage, +) { + overseer_send( + virtual_overseer, + ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( + peer_id.clone(), + Versioned::VStaging(msg), + )), + ) + .await; +} + fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> IndirectAssignmentCert { let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); let msg = b"WhenParachains?"; @@ -273,6 +292,28 @@ fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> Indirect } } +fn fake_assignment_cert_v2( + block_hash: Hash, + validator: ValidatorIndex, + core_bitfield: CoreBitfield, +) -> IndirectAssignmentCertV2 { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"WhenParachains?"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let out = inout.to_output(); + + IndirectAssignmentCertV2 { + block_hash, + validator, + cert: AssignmentCertV2 { + kind: AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield }, + vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) }, + }, + } +} + async fn expect_reputation_change( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, @@ -307,9 +348,9 @@ fn try_import_the_same_assignment() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, view![]).await; - setup_peer_with_view(overseer, &peer_b, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -362,8 +403,8 @@ fn try_import_the_same_assignment() { } ); - // setup new peer - setup_peer_with_view(overseer, &peer_d, view![]).await; + // setup new peer with V2 + setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::VStaging).await; // send the same assignment from peer_d let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); @@ -377,6 +418,99 @@ fn try_import_the_same_assignment() { }); } +/// import an assignment +/// connect a new peer +/// the new peer sends us the same assignment +#[test] +fn try_import_the_same_assignment_v2() { + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(State::default(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + // setup peers + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::VStaging).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 2, + candidates: vec![Default::default(); 1], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // send the assignment related to `hash` + let validator_index = ValidatorIndex(0); + let cores = vec![1, 2, 3, 4]; + let core_bitfield: CoreBitfield = cores + .iter() + .map(|index| CoreIndex(*index)) + .collect::>() + .try_into() + .unwrap(); + + let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfield.clone()); + let assignments = vec![(cert.clone(), cores.clone().try_into().unwrap())]; + + let msg = protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments.clone()); + send_message_from_peer_v2(overseer, &peer_a, msg).await; + + expect_reputation_change(overseer, &peer_a, COST_UNEXPECTED_MESSAGE).await; + + // send an `Accept` message from the Approval Voting subsystem + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + assignment, + claimed_indices, + tx, + )) => { + assert_eq!(claimed_indices, cores.try_into().unwrap()); + assert_eq!(assignment, cert.into()); + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_a, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 2); + assert_eq!(assignments.len(), 1); + } + ); + + // setup new peer + setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::VStaging).await; + + // send the same assignment from peer_d + let msg = protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments); + send_message_from_peer_v2(overseer, &peer_d, msg).await; + + expect_reputation_change(overseer, &peer_d, COST_UNEXPECTED_MESSAGE).await; + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE).await; + + assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); + virtual_overseer + }); +} + /// /// /// 1. Send a view update that removes block B from their view. @@ -392,7 +526,7 @@ fn spam_attack_results_in_negative_reputation_change() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; let peer = &peer_a; - setup_peer_with_view(overseer, peer, view![]).await; + setup_peer_with_view(overseer, peer, view![], ValidationVersion::V1).await; // new block `hash_b` with 20 candidates let candidates_count = 20; @@ -476,7 +610,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; let peer = &peer_a; - setup_peer_with_view(overseer, peer, view![]).await; + setup_peer_with_view(overseer, peer, view![], ValidationVersion::V1).await; // new block `hash` with 1 candidates let meta = BlockApprovalMeta { @@ -554,10 +688,10 @@ fn import_approval_happy_path() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; - // setup peers - setup_peer_with_view(overseer, &peer_a, view![]).await; - setup_peer_with_view(overseer, &peer_b, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, view![hash]).await; + // setup peers with V1 and V2 protocol versions + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -584,6 +718,7 @@ fn import_approval_happy_path() { ) .await; + // 1 peer is v1 assert_matches!( overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( @@ -592,7 +727,21 @@ fn import_approval_happy_path() { protocol_v1::ApprovalDistributionMessage::Assignments(assignments) )) )) => { - assert_eq!(peers.len(), 2); + assert_eq!(peers.len(), 1); + assert_eq!(assignments.len(), 1); + } + ); + + // 1 peer is v2 + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 1); assert_eq!(assignments.len(), 1); } ); @@ -646,8 +795,8 @@ fn import_approval_bad() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, view![]).await; - setup_peer_with_view(overseer, &peer_b, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -848,7 +997,7 @@ fn update_peer_view() { .await; // connect a peer - setup_peer_with_view(overseer, peer, view![hash_a]).await; + setup_peer_with_view(overseer, peer, view![hash_a], ValidationVersion::V1).await; // we should send relevant assignments to the peer assert_matches!( @@ -963,7 +1112,7 @@ fn import_remotely_then_locally() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup the peer - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -1090,7 +1239,7 @@ fn sends_assignments_even_when_state_is_approved() { .await; // connect the peer. - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -1154,7 +1303,7 @@ fn race_condition_in_local_vs_remote_view_update() { }; // This will send a peer view that is ahead of our view - setup_peer_with_view(overseer, peer, view![hash_b]).await; + setup_peer_with_view(overseer, peer, view![hash_b], ValidationVersion::V1).await; // Send our view update to include a new head overseer_send( @@ -1218,7 +1367,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // Set up a gossip topology. @@ -1326,7 +1475,7 @@ fn propagates_assignments_along_unshared_dimension() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // Set up a gossip topology. @@ -1468,7 +1617,7 @@ fn propagates_to_required_after_connect() { // Connect all peers except omitted. for (i, (peer, _)) in peers.iter().enumerate() { if !omitted.contains(&i) { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } } @@ -1560,7 +1709,7 @@ fn propagates_to_required_after_connect() { ); for i in omitted.iter().copied() { - setup_peer_with_view(overseer, &peers[i].0, view![hash]).await; + setup_peer_with_view(overseer, &peers[i].0, view![hash], ValidationVersion::V1).await; assert_matches!( overseer_recv(overseer).await, @@ -1609,7 +1758,7 @@ fn sends_to_more_peers_after_getting_topology() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // new block `hash_a` with 1 candidates @@ -1764,7 +1913,7 @@ fn originator_aggression_l1() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // new block `hash_a` with 1 candidates @@ -1928,7 +2077,7 @@ fn non_originator_aggression_l1() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // new block `hash_a` with 1 candidates @@ -2033,7 +2182,7 @@ fn non_originator_aggression_l2() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // new block `hash_a` with 1 candidates @@ -2199,7 +2348,7 @@ fn resends_messages_periodically() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; } // Set up a gossip topology. diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 7e7334113fed..c53519a7889a 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -388,7 +388,7 @@ pub mod v2 { /// The criterion which is claimed to be met by this cert. pub kind: AssignmentCertKindV2, /// The VRF showing the criterion is met. - pub vrf: (VrfOutput, VrfProof), + pub vrf: VrfSignature, } impl From for AssignmentCertV2 { @@ -400,7 +400,7 @@ pub mod v2 { super::v1::AssignmentCertKind::RelayVRFModulo { sample } => AssignmentCertKindV2::RelayVRFModulo { sample }, }, - vrf: (cert.vrf.output, cert.vrf.proof), + vrf: cert.vrf, } } } @@ -424,7 +424,7 @@ pub mod v2 { // Not supported _ => return Err(AssignmentConversionError::CertificateNotSupported), }, - vrf: VrfSignature { output: cert.vrf.0, proof: cert.vrf.1 }, + vrf: cert.vrf, }) } } From 5c5b86b29794fa64ee055ad3496897bfd48dbd57 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 17 May 2023 14:28:23 +0000 Subject: [PATCH 053/105] Add another approval distribution test Signed-off-by: Andrei Sandu --- .../approval-distribution/src/tests.rs | 110 +++++++++++++++++- 1 file changed, 107 insertions(+), 3 deletions(-) diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index d0ab46ca700c..626b8a6e21a6 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -418,9 +418,8 @@ fn try_import_the_same_assignment() { }); } -/// import an assignment -/// connect a new peer -/// the new peer sends us the same assignment +/// Just like `try_import_the_same_assignment` but use `VRFModuloCompact` assignments for multiple +/// cores. #[test] fn try_import_the_same_assignment_v2() { let peer_a = PeerId::random(); @@ -1275,6 +1274,111 @@ fn sends_assignments_even_when_state_is_approved() { }); } +/// Same as `sends_assignments_even_when_state_is_approved_v2` but with `VRFModuloCompact` assignemnts. +#[test] +fn sends_assignments_even_when_state_is_approved_v2() { + let peer_a = PeerId::random(); + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + let peer = &peer_a; + + let _ = test_harness(State::default(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 4], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + let validator_index = ValidatorIndex(0); + let cores = vec![0, 1, 2, 3]; + let candidate_bitfield: CandidateBitfield = cores.clone().try_into().unwrap(); + + let core_bitfield: CoreBitfield = cores + .iter() + .map(|index| CoreIndex(*index)) + .collect::>() + .try_into() + .unwrap(); + + let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfield.clone()); + + // Assumes candidate index == core index. + let approvals = cores + .iter() + .map(|core| IndirectSignedApprovalVote { + block_hash: hash, + candidate_index: *core, + validator: validator_index, + signature: dummy_signature(), + }) + .collect::>(); + + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_bitfield.clone(), + ), + ) + .await; + + for approval in &approvals { + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone()), + ) + .await; + } + + // connect the peer. + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::VStaging).await; + + let assignments = vec![(cert.clone(), candidate_bitfield.clone())]; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(sent_assignments) + )) + )) => { + assert_eq!(peers, vec![peer.clone()]); + assert_eq!(sent_assignments, assignments); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Approvals(sent_approvals) + )) + )) => { + // Construct a hashmaps of approvals for comparison. Approval distribution reorders messages because they are kept in a + // hashmap as well. + let sent_approvals = sent_approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); + let approvals = approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); + + assert_eq!(peers, vec![peer.clone()]); + assert_eq!(sent_approvals, approvals); + } + ); + + assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); + virtual_overseer + }); +} + /// /// /// 1. Receive remote peer view update with an unknown head From 7f51489e52398453968093a221972af6c3326cac Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 17 May 2023 14:50:05 +0000 Subject: [PATCH 054/105] add zombienet test Signed-off-by: Andrei Sandu --- scripts/ci/gitlab/pipeline/zombienet.yml | 29 ++++++++++++++ .../0004-parachains-max-tranche0.toml | 40 +++++++++++++++++++ .../0004-parachains-max-tranche0.zndsl | 27 +++++++++++++ 3 files changed, 96 insertions(+) create mode 100644 zombienet_tests/functional/0004-parachains-max-tranche0.toml create mode 100644 zombienet_tests/functional/0004-parachains-max-tranche0.zndsl diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index be61502eb8a8..3424b08e0a0b 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -124,6 +124,35 @@ zombienet-tests-parachains-disputes-garbage-candidate: tags: - zombienet-polkadot-integration-test +zombienet-tests-parachains-max-tranche0-approvals: + stage: zombienet + image: "${ZOMBIENET_IMAGE}" + extends: + - .kubernetes-env + - .zombienet-refs + needs: + - job: publish-polkadot-debug-image + - job: publish-test-collators-image + - job: publish-malus-image + variables: + GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional" + before_script: + - echo "Zombie-net Tests Config" + - echo "${ZOMBIENET_IMAGE_NAME}" + - echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}" + - echo "${GH_DIR}" + - export DEBUG=zombie,zombie::network-node + - export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG} + - export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG} + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh + --github-remote-dir="${GH_DIR}" + --test="0004-parachains-max-tranche0.zndsl" + allow_failure: false + retry: 2 + tags: + - zombienet-polkadot-integration-test + zombienet-test-parachains-upgrade-smoke-test: stage: zombienet image: "${ZOMBIENET_IMAGE}" diff --git a/zombienet_tests/functional/0004-parachains-max-tranche0.toml b/zombienet_tests/functional/0004-parachains-max-tranche0.toml new file mode 100644 index 000000000000..ab7fa0195d13 --- /dev/null +++ b/zombienet_tests/functional/0004-parachains-max-tranche0.toml @@ -0,0 +1,40 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtime.runtime_genesis_config.configuration.config] + max_validators_per_core = 1 + needed_approvals = 7 + relay_vrf_modulo_samples = 5 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +chain_spec_command = "polkadot build-spec --chain rococo-local --disable-default-bootnode" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "some-validator" + count = 8 + args = ["-lparachain=debug,runtime=debug"] + +{% for id in range(2000,2005) %} +[[parachains]] +id = {{id}} +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size={{10000*(id-1999)}} --pvf-complexity={{id - 1999}}" + [parachains.collator] + image = "{{COL_IMAGE}}" + name = "collator" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size={{10000*(id-1999)}}", "--parachain-id={{id}}", "--pvf-complexity={{id - 1999}}"] +{% endfor %} + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/zombienet_tests/functional/0004-parachains-max-tranche0.zndsl b/zombienet_tests/functional/0004-parachains-max-tranche0.zndsl new file mode 100644 index 000000000000..4be4812fd1bd --- /dev/null +++ b/zombienet_tests/functional/0004-parachains-max-tranche0.zndsl @@ -0,0 +1,27 @@ +Description: Test if parachains make progress with most of approvals being tranch0 +Network: ./0004-parachains-max-tranche0.toml +Creds: config + +# Check authority status. +some-validator-0: reports node_roles is 4 +some-validator-1: reports node_roles is 4 +some-validator-3: reports node_roles is 4 +some-validator-4: reports node_roles is 4 +some-validator-5: reports node_roles is 4 +some-validator-6: reports node_roles is 4 +some-validator-7: reports node_roles is 4 + +some-validator-0: parachain 2000 block height is at least 5 within 180 seconds +some-validator-1: parachain 2001 block height is at least 5 within 180 seconds +some-validator-2: parachain 2002 block height is at least 5 within 180 seconds +some-validator-3: parachain 2003 block height is at least 5 within 180 seconds +some-validator-4: parachain 2004 block height is at least 5 within 180 seconds + +some-validator-0: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-1: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-2: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-3: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-4: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-5: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-6: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 +some-validator-7: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 From f4a983b13e4b3b94d5ee6f8954f6ce459ef87ddb Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 17 May 2023 18:30:21 +0000 Subject: [PATCH 055/105] fix tests build Signed-off-by: Andrei Sandu --- node/service/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/service/src/tests.rs b/node/service/src/tests.rs index 758581e1e682..1a8eaa606469 100644 --- a/node/service/src/tests.rs +++ b/node/service/src/tests.rs @@ -17,7 +17,7 @@ use super::{relay_chain_selection::*, *}; use futures::channel::oneshot::Receiver; -use polkadot_node_primitives::approval::VrfSignature; +use polkadot_node_primitives::approval::v2::VrfSignature; use polkadot_node_subsystem::messages::{AllMessages, BlockDescription}; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; From 2a65a98095f40986e111900ae4d1c7cd526f40e9 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 18 May 2023 09:44:42 +0000 Subject: [PATCH 056/105] fix build Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/lib.rs | 5 +++-- node/primitives/src/approval.rs | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 9542d69e6e9f..339ab035dcf6 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -2035,8 +2035,9 @@ where check_and_import_assignment_span.add_uint_tag("tranche", tranche as u64); // We've imported a new assignment, so we need to schedule a wake-up for when that might no-show. - if let Some((approval_entry, status)) = - state.approval_status(sender, &block_entry, session_info_provider, &candidate_entry) + if let Some((approval_entry, status)) = state + .approval_status(sender, session_info_provider, &block_entry, &candidate_entry) + .await { actions.extend(schedule_wakeup_action( approval_entry, diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index c53519a7889a..5510409da799 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -173,7 +173,8 @@ pub mod v1 { let pubkey = schnorrkel::PublicKey::from_bytes(author.as_slice()) .map_err(ApprovalError::SchnorrkelSignature)?; - let transcript = sp_consensus_babe::make_transcript(randomness, self.slot, epoch_index); + let transcript = + sp_consensus_babe::make_vrf_transcript(randomness, self.slot, epoch_index); let inout = self .vrf_output From 88b4349c40304d1c67ed4b181ed380a92adb9abe Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 18 May 2023 09:45:29 +0000 Subject: [PATCH 057/105] enable v2 assignemnts for testing Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index a76912a8fa3d..e8ddcd7eb152 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -276,7 +276,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, true) } fn check_assignment_cert( From 424ea57eab8bc9e6ddb599e118b1655debe4262d Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 18 May 2023 15:37:04 +0000 Subject: [PATCH 058/105] Add back removed comment Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 03b0638358b9..b766ed9c9616 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -225,6 +225,24 @@ struct PeerEntry { pub version: ProtocolVersion, } +// In case the original gtid topology mechanisms don't work on their own, we need to trade bandwidth +// for protocol liveliness by introducing aggression. +// +// Aggression has 3 levels: +// +// * Aggression Level 0: The basic behaviors described above. +// * Aggression Level 1: The originator of a message sends to all peers. Other peers follow the rules above. +// * Aggression Level 2: All peers send all messages to all their row and column neighbors. +// This means that each validator will, on average, receive each message approximately `2*sqrt(n)` times. +// The aggression level of messages pertaining to a block increases when that block is unfinalized and +// is a child of the finalized block. +// This means that only one block at a time has its messages propagated with aggression > 0. +// +// A note on aggression thresholds: changes in propagation apply only to blocks which are the +// _direct descendants_ of the finalized block which are older than the given threshold, +// not to all blocks older than the threshold. Most likely, a few assignments struggle to +// be propagated in a single block and this holds up all of its descendants blocks. +// Accordingly, we only step on the gas for the block which is most obviously holding up finality. /// Aggression configuration representation #[derive(Clone)] struct AggressionConfig { From 229d1c111d3757fc9fab8d76c1ee22df031484ac Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 22 May 2023 13:07:48 +0000 Subject: [PATCH 059/105] review feedback 1/2 Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 26 +++++++++-- node/core/approval-voting/src/lib.rs | 4 +- node/network/approval-distribution/src/lib.rs | 43 +++++++++++-------- node/network/protocol/src/lib.rs | 2 +- .../src/node/approval/approval-voting.md | 2 +- 5 files changed, 50 insertions(+), 27 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index e8ddcd7eb152..899fec0a45f2 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -372,6 +372,7 @@ pub(crate) fn compute_assignments( let mut assignments = HashMap::new(); + // First run `RelayVRFModulo` for each sample. if enable_v2_assignments { compute_relay_vrf_modulo_assignments_v2( &assignments_key, @@ -382,7 +383,7 @@ pub(crate) fn compute_assignments( &mut assignments, ); } else { - compute_relay_vrf_modulo_assignments( + compute_relay_vrf_modulo_assignments_v1( &assignments_key, index, config, @@ -405,7 +406,7 @@ pub(crate) fn compute_assignments( assignments } -fn compute_relay_vrf_modulo_assignments( +fn compute_relay_vrf_modulo_assignments_v1( assignments_key: &schnorrkel::Keypair, validator_index: ValidatorIndex, config: &Config, @@ -716,7 +717,6 @@ pub(crate) fn check_assignment_cert( config.n_cores, ); - // TODO: Enforce that all claimable cores are claimed, or include refused cores. // Currently validators can opt out of checking specific cores. // This is the same issue to how validator can opt out and not send their assignments in the first place. @@ -740,6 +740,16 @@ pub(crate) fn check_assignment_cert( return Err(InvalidAssignment(Reason::SampleOutOfBounds)) } + // Enforce claimed candidates is 1. + if claimed_core_indices.count_ones() != 1 { + gum::warn!( + target: LOG_TARGET, + ?claimed_core_indices, + "`RelayVRFModulo` assignment must always claim 1 core", + ); + return Err(InvalidAssignment(Reason::InvalidArguments)) + } + let (vrf_in_out, _) = public .vrf_verify_extra( relay_vrf_modulo_transcript_v1(relay_vrf_story, *sample), @@ -766,6 +776,16 @@ pub(crate) fn check_assignment_cert( } }, AssignmentCertKindV2::RelayVRFDelay { core_index } => { + // Enforce claimed candidates is 1. + if claimed_core_indices.count_ones() != 1 { + gum::debug!( + target: LOG_TARGET, + ?claimed_core_indices, + "`RelayVRFDelay` assignment must always claim 1 core", + ); + return Err(InvalidAssignment(Reason::InvalidArguments)) + } + if core_index.0 as usize != claimed_core_indices.first_one().expect("Checked above; qed") { diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 339ab035dcf6..30e091ae08cb 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -431,8 +431,7 @@ struct ApprovalVoteRequest { #[derive(Default)] struct Wakeups { - // Tick -> [(Relay Block, Vec of Candidate Hash)] - // For Compact modulo VRF wakeups we want to wake-up once for all candidates + // Tick -> [(Relay Block, Candidate Hash)] wakeups: BTreeMap>, reverse_wakeups: HashMap<(Hash, CandidateHash), Tick>, block_numbers: BTreeMap>, @@ -1863,7 +1862,6 @@ where .map(|span| span.child("check-and-import-assignment")) .unwrap_or_else(|| jaeger::Span::new(assignment.block_hash, "check-and-import-assignment")) .with_relay_parent(assignment.block_hash) - // .with_uint_tag("candidate-index", candidate_index as u64) .with_stage(jaeger::Stage::ApprovalChecking); for candidate_index in candidate_indices.iter_ones() { diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index b766ed9c9616..a950b8d660c9 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -412,15 +412,15 @@ struct BlockEntry { /// This maps to their knowledge of messages. known_by: HashMap, /// The number of the block. - pub number: BlockNumber, + number: BlockNumber, /// The parent hash of the block. - pub parent_hash: Hash, + parent_hash: Hash, /// Our knowledge of messages. - pub knowledge: Knowledge, + knowledge: Knowledge, /// A votes entry for each candidate indexed by [`CandidateIndex`]. candidates: Vec, /// The session index of this block. - pub session: SessionIndex, + session: SessionIndex, /// Approval entries for whole block. These also contain all approvals in the cae of multiple candidates /// being claimed by assignments. approval_entries: HashMap<(ValidatorIndex, CandidateBitfield), ApprovalEntry>, @@ -2002,6 +2002,7 @@ impl ApprovalDistribution { ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { // TODO: Fix warning: `Importing locally an already known assignment` for multiple candidate assignments. // This is due to the fact that we call this on wakeup, and we do have a wakeup for each candidate index, but + // a single assignment claiming the candidates. let _span = state .spans .get(&cert.block_hash) @@ -2101,18 +2102,19 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( // Low level helper for sending assignments. async fn send_assignments_batched_inner( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - batch: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, - peers: &Vec, - // TODO: use `ValidationVersion`. - peer_version: u32, + batch: impl IntoIterator, + peers: &[PeerId], + peer_version: ValidationVersion, ) { let peers = peers.into_iter().cloned().collect::>(); - if peer_version == 2 { + if peer_version == ValidationVersion::VStaging { sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( peers, Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(batch), + protocol_vstaging::ApprovalDistributionMessage::Assignments( + batch.into_iter().collect(), + ), )), )) .await; @@ -2152,22 +2154,24 @@ async fn send_assignments_batched_inner( /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - v2_assignments: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, - peers: &Vec<(PeerId, ProtocolVersion)>, + v2_assignments: impl IntoIterator + Clone, + peers: &[(PeerId, ProtocolVersion)], ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); if v1_peers.len() > 0 { - let mut v1_assignments = v2_assignments.clone(); // Older peers(v1) do not understand `AssignmentsV2` messages, so we have to filter these out. - v1_assignments.retain(|(_, candidates)| candidates.count_ones() == 1); + let v1_assignments = v2_assignments + .clone() + .into_iter() + .filter(|(_, candidates)| candidates.count_ones() == 1); - let mut v1_batches = v1_assignments.into_iter().peekable(); + let mut v1_batches = v1_assignments.peekable(); while v1_batches.peek().is_some() { let batch: Vec<_> = v1_batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect(); - send_assignments_batched_inner(sender, batch, &v1_peers, 1).await; + send_assignments_batched_inner(sender, batch, &v1_peers, ValidationVersion::V1).await; } } @@ -2176,7 +2180,8 @@ pub(crate) async fn send_assignments_batched( while v2_batches.peek().is_some() { let batch = v2_batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); - send_assignments_batched_inner(sender, batch, &v2_peers, 2).await; + send_assignments_batched_inner(sender, batch, &v2_peers, ValidationVersion::VStaging) + .await; } } } @@ -2184,8 +2189,8 @@ pub(crate) async fn send_assignments_batched( /// Send approvals while honoring the `max_notification_size` of the protocol and peer version. pub(crate) async fn send_approvals_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - approvals: Vec, - peers: &Vec<(PeerId, ProtocolVersion)>, + approvals: impl IntoIterator + Clone, + peers: &[(PeerId, ProtocolVersion)], ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index c26ddeb14d6f..009d44689c2e 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -632,7 +632,7 @@ pub mod v1 { /// Returns the subset of `peers` with the specified `version`. pub fn filter_by_peer_version( - peers: &Vec<(PeerId, peer_set::ProtocolVersion)>, + peers: &[(PeerId, peer_set::ProtocolVersion)], version: peer_set::ProtocolVersion, ) -> Vec { peers.iter().filter(|(_, v)| v == &version).map(|(p, _)| *p).collect::>() diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md index a307092ad7ef..d8d9826a0f01 100644 --- a/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -55,7 +55,7 @@ struct OurAssignment { validator_index: ValidatorIndex, triggered: bool, /// A subset of the core indices obtained from the VRF output. - pub assignment_bitfield: AssignmentBitfield, + assignment_bitfield: AssignmentBitfield, } struct ApprovalEntry { From 9152a9cfe03440678c80c53e1934cf931f3f00ff Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 24 May 2023 14:31:07 +0000 Subject: [PATCH 060/105] Full approval db migration to v2 Signed-off-by: Andrei Sandu --- node/core/approval-voting/Cargo.toml | 3 + .../approval-voting/src/approval_checking.rs | 36 +- .../approval-voting/src/approval_db/mod.rs | 1 + .../approval-voting/src/approval_db/v1/mod.rs | 309 +------------- .../src/approval_db/v2/migration_helpers.rs | 268 ++++++++++++ .../approval-voting/src/approval_db/v2/mod.rs | 383 ++++++++++++++++++ .../src/approval_db/{v1 => v2}/tests.rs | 8 +- node/core/approval-voting/src/backend.rs | 9 +- node/core/approval-voting/src/criteria.rs | 28 +- node/core/approval-voting/src/import.rs | 12 +- node/core/approval-voting/src/lib.rs | 10 +- node/core/approval-voting/src/ops.rs | 2 +- .../approval-voting/src/persisted_entries.rs | 60 ++- node/core/approval-voting/src/tests.rs | 24 +- node/service/src/parachains_db/upgrade.rs | 134 ++++-- 15 files changed, 885 insertions(+), 402 deletions(-) create mode 100644 node/core/approval-voting/src/approval_db/v2/migration_helpers.rs create mode 100644 node/core/approval-voting/src/approval_db/v2/mod.rs rename node/core/approval-voting/src/approval_db/{v1 => v2}/tests.rs (100%) diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml index ba689e368bfc..1436030c8b4d 100644 --- a/node/core/approval-voting/Cargo.toml +++ b/node/core/approval-voting/Cargo.toml @@ -30,6 +30,9 @@ sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "mast sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, features = ["full_crypto"] } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +# Needed for migration test helpers +test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +rand_core = "0.5.1" [dev-dependencies] async-trait = "0.1.57" diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index 07129f4c4aa7..d661ccd80578 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -472,7 +472,7 @@ mod tests { } .into(); - let approval_entry = approval_db::v1::ApprovalEntry { + let approval_entry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), assigned_validators: BitVec::default(), our_assignment: None, @@ -509,17 +509,17 @@ mod tests { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v1::ApprovalEntry { + let approval_entry = approval_db::v2::ApprovalEntry { tranches: vec![ - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 0, assignments: (0..2).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 1, assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 2, assignments: (5..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, @@ -581,17 +581,17 @@ mod tests { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v1::ApprovalEntry { + let approval_entry = approval_db::v2::ApprovalEntry { tranches: vec![ - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 0, assignments: (0..4).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 1, assignments: (4..6).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 2, assignments: (6..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, @@ -647,7 +647,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 5], our_assignment: None, @@ -691,7 +691,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, @@ -731,7 +731,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, @@ -776,7 +776,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -843,7 +843,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -934,7 +934,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -1041,10 +1041,10 @@ mod tests { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v1::ApprovalEntry { + let approval_entry = approval_db::v2::ApprovalEntry { tranches: vec![ // Assignments with invalid validator indexes. - approval_db::v1::TrancheEntry { + approval_db::v2::TrancheEntry { tranche: 1, assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, @@ -1094,7 +1094,7 @@ mod tests { ]; for test_tranche in test_tranches { - let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { tranches: Vec::new(), backing_group: GroupIndex(0), our_assignment: None, diff --git a/node/core/approval-voting/src/approval_db/mod.rs b/node/core/approval-voting/src/approval_db/mod.rs index f0ace95613da..20fb6aa82d8d 100644 --- a/node/core/approval-voting/src/approval_db/mod.rs +++ b/node/core/approval-voting/src/approval_db/mod.rs @@ -31,3 +31,4 @@ //! time being we share the same DB with the rest of Substrate. pub mod v1; +pub mod v2; diff --git a/node/core/approval-voting/src/approval_db/v1/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs index 7d01f65127f5..5a6c19592532 100644 --- a/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -17,173 +17,25 @@ //! Version 1 of the DB schema. use parity_scale_codec::{Decode, Encode}; -use polkadot_node_primitives::approval::{ - v1::DelayTranche, - v2::{AssignmentCertV2, CoreBitfield}, -}; -use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; -use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_node_primitives::approval::v1::{AssignmentCert, DelayTranche}; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, - ValidatorIndex, ValidatorSignature, -}; -use sp_consensus_slots::Slot; - -use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use std::{collections::BTreeMap, sync::Arc}; - -use crate::{ - backend::{Backend, BackendWriteOp}, - persisted_entries, + CandidateReceipt, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, }; -const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; - -#[cfg(test)] -pub mod tests; - -/// `DbBackend` is a concrete implementation of the higher-level Backend trait -pub struct DbBackend { - inner: Arc, - config: Config, -} - -impl DbBackend { - /// Create a new [`DbBackend`] with the supplied key-value store and - /// config. - pub fn new(db: Arc, config: Config) -> Self { - DbBackend { inner: db, config } - } -} - -impl Backend for DbBackend { - fn load_block_entry( - &self, - block_hash: &Hash, - ) -> SubsystemResult> { - load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) - } - - fn load_candidate_entry( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) - } - - fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { - load_blocks_at_height(&*self.inner, &self.config, block_height) - } - - fn load_all_blocks(&self) -> SubsystemResult> { - load_all_blocks(&*self.inner, &self.config) - } - - fn load_stored_blocks(&self) -> SubsystemResult> { - load_stored_blocks(&*self.inner, &self.config) - } - - /// Atomically write the list of operations, with later operations taking precedence over prior. - fn write(&mut self, ops: I) -> SubsystemResult<()> - where - I: IntoIterator, - { - let mut tx = DBTransaction::new(); - for op in ops { - match op { - BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { - tx.put_vec( - self.config.col_approval_data, - &STORED_BLOCKS_KEY, - stored_block_range.encode(), - ); - }, - BackendWriteOp::DeleteStoredBlockRange => { - tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); - }, - BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { - tx.put_vec( - self.config.col_approval_data, - &blocks_at_height_key(h), - blocks.encode(), - ); - }, - BackendWriteOp::DeleteBlocksAtHeight(h) => { - tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); - }, - BackendWriteOp::WriteBlockEntry(block_entry) => { - let block_entry: BlockEntry = block_entry.into(); - tx.put_vec( - self.config.col_approval_data, - &block_entry_key(&block_entry.block_hash), - block_entry.encode(), - ); - }, - BackendWriteOp::DeleteBlockEntry(hash) => { - tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); - }, - BackendWriteOp::WriteCandidateEntry(candidate_entry) => { - let candidate_entry: CandidateEntry = candidate_entry.into(); - tx.put_vec( - self.config.col_approval_data, - &candidate_entry_key(&candidate_entry.candidate.hash()), - candidate_entry.encode(), - ); - }, - BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { - tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); - }, - } - } - - self.inner.write(tx).map_err(|e| e.into()) - } -} - -/// A range from earliest..last block number stored within the DB. -#[derive(Encode, Decode, Debug, Clone, PartialEq)] -pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber); - -// slot_duration * 2 + DelayTranche gives the number of delay tranches since the -// unix epoch. -#[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)] -pub struct Tick(u64); +use std::collections::BTreeMap; -/// Convenience type definition -pub type Bitfield = BitVec; - -/// The database config. -#[derive(Debug, Clone, Copy)] -pub struct Config { - /// The column family in the database where data is stored. - pub col_approval_data: u32, - /// The column of the database where rolling session window data is stored. - pub col_session_data: u32, -} +use super::v2::Bitfield; /// Details pertaining to our assignment on a block. #[derive(Encode, Decode, Debug, Clone, PartialEq)] pub struct OurAssignment { - /// Our assignment certificate. - pub cert: AssignmentCertV2, - /// The tranche for which the assignment refers to. + pub cert: AssignmentCert, pub tranche: DelayTranche, - /// Our validator index for the session in which the candidates were included. pub validator_index: ValidatorIndex, - /// Whether the assignment has been triggered already. + // Whether the assignment has been triggered already. pub triggered: bool, - /// A subset of the core indices obtained from the VRF output. - pub assignment_bitfield: CoreBitfield, -} - -/// Metadata regarding a specific tranche of assignments for a specific candidate. -#[derive(Encode, Decode, Debug, Clone, PartialEq)] -pub struct TrancheEntry { - pub tranche: DelayTranche, - // Assigned validators, and the instant we received their assignment, rounded - // to the nearest tick. - pub assignments: Vec<(ValidatorIndex, Tick)>, } +use super::v2::TrancheEntry; /// Metadata regarding approval of a particular candidate within the context of some /// particular block. @@ -194,7 +46,7 @@ pub struct ApprovalEntry { pub our_assignment: Option, pub our_approval_sig: Option, // `n_validators` bits. - pub assigned_validators: Bitfield, + pub assignments: Bitfield, pub approved: bool, } @@ -208,148 +60,3 @@ pub struct CandidateEntry { pub block_assignments: BTreeMap, pub approvals: Bitfield, } - -/// Metadata regarding approval of a particular block, by way of approval of the -/// candidates contained within it. -#[derive(Encode, Decode, Debug, Clone, PartialEq)] -pub struct BlockEntry { - pub block_hash: Hash, - pub block_number: BlockNumber, - pub parent_hash: Hash, - pub session: SessionIndex, - pub slot: Slot, - /// Random bytes derived from the VRF submitted within the block by the block - /// author as a credential and used as input to approval assignment criteria. - pub relay_vrf_story: [u8; 32], - // The candidates included as-of this block and the index of the core they are - // leaving. Sorted ascending by core index. - pub candidates: Vec<(CoreIndex, CandidateHash)>, - // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. - // The i'th bit is `true` iff the candidate has been approved in the context of this - // block. The block can be considered approved if the bitfield has all bits set to `true`. - pub approved_bitfield: Bitfield, - pub children: Vec, -} - -impl From for Tick { - fn from(tick: crate::Tick) -> Tick { - Tick(tick) - } -} - -impl From for crate::Tick { - fn from(tick: Tick) -> crate::Tick { - tick.0 - } -} - -/// Errors while accessing things from the DB. -#[derive(Debug, derive_more::From, derive_more::Display)] -pub enum Error { - Io(std::io::Error), - InvalidDecoding(parity_scale_codec::Error), -} - -impl std::error::Error for Error {} - -/// Result alias for DB errors. -pub type Result = std::result::Result; - -pub(crate) fn load_decode( - store: &dyn Database, - col_approval_data: u32, - key: &[u8], -) -> Result> { - match store.get(col_approval_data, key)? { - None => Ok(None), - Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), - } -} - -/// The key a given block entry is stored under. -pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] { - const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; - - let mut key = [0u8; 14 + 32]; - key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); - key[14..][..32].copy_from_slice(block_hash.as_ref()); - - key -} - -/// The key a given candidate entry is stored under. -pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { - const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; - - let mut key = [0u8; 14 + 32]; - key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); - key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); - - key -} - -/// The key a set of block hashes corresponding to a block number is stored under. -pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { - const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; - - let mut key = [0u8; 12 + 4]; - key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); - block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); - - key -} - -/// Return all blocks which have entries in the DB, ascending, by height. -pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult> { - let mut hashes = Vec::new(); - if let Some(stored_blocks) = load_stored_blocks(store, config)? { - for height in stored_blocks.0..stored_blocks.1 { - let blocks = load_blocks_at_height(store, config, &height)?; - hashes.extend(blocks); - } - } - - Ok(hashes) -} - -/// Load the stored-blocks key from the state. -pub fn load_stored_blocks( - store: &dyn Database, - config: &Config, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a blocks-at-height entry for a given block number. -pub fn load_blocks_at_height( - store: &dyn Database, - config: &Config, - block_number: &BlockNumber, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) - .map(|x| x.unwrap_or_default()) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a block entry from the aux store. -pub fn load_block_entry( - store: &dyn Database, - config: &Config, - block_hash: &Hash, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) - .map(|u: Option| u.map(|v| v.into())) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a candidate entry from the aux store. -pub fn load_candidate_entry( - store: &dyn Database, - config: &Config, - candidate_hash: &CandidateHash, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) - .map(|u: Option| u.map(|v| v.into())) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} diff --git a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs new file mode 100644 index 000000000000..95004f63b7b1 --- /dev/null +++ b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -0,0 +1,268 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Approval DB migration helpers. +use super::{StoredBlockRange, *}; +use crate::backend::Backend; +use polkadot_node_primitives::approval::v1::{ + AssignmentCert, AssignmentCertKind, VrfOutput, VrfProof, VrfSignature, RELAY_VRF_MODULO_CONTEXT, +}; +use polkadot_node_subsystem_util::database::Database; +use std::{collections::HashSet, sync::Arc}; + +use ::test_helpers::dummy_candidate_receipt; + +fn dummy_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"test-garbage"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let out = inout.to_output(); + + AssignmentCert { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } +} +fn make_block_entry( + block_hash: Hash, + parent_hash: Hash, + block_number: BlockNumber, + candidates: Vec<(CoreIndex, CandidateHash)>, +) -> BlockEntry { + BlockEntry { + block_hash, + parent_hash, + block_number, + session: 1, + slot: Slot::from(1), + relay_vrf_story: [0u8; 32], + approved_bitfield: make_bitvec(candidates.len()), + candidates, + children: Vec::new(), + } +} + +fn make_bitvec(len: usize) -> BitVec { + bitvec::bitvec![u8, BitOrderLsb0; 0; len] +} + +pub fn dummy_assignment_bitfield() -> CoreBitfield { + vec![ + CoreIndex(0), + CoreIndex(1), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(6), + CoreIndex(7), + ] + .try_into() + .expect("If failed, `CoreBitfield` is broken; qed") +} + +/// Migrates `OurAssignment`, `CandidateEntry` and `ApprovalEntry` to version 2. +/// Returns on any error. +/// Must only be used in parachains DB migration code - `polkadot-service` crate. +pub fn migrate_approval_db_v1_to_v2(db: Arc, config: Config) -> Result<()> { + let mut backend = crate::DbBackend::new(db, config); + let all_blocks = backend + .load_all_blocks() + .map_err(|e| Error::InternalError(e))? + .iter() + .filter_map(|block_hash| { + backend.load_block_entry(block_hash).map_err(|e| Error::InternalError(e)).ok()? + }) + .collect::>(); + + gum::info!( + target: crate::LOG_TARGET, + "Migrating candidate entries on top of {} blocks", + all_blocks.len() + ); + + let mut overlay = crate::OverlayedBackend::new(&backend); + let mut counter = 0; + // Get all candidate entries, approval entries and convert each of them. + for block in all_blocks { + for (core_index, candidate_hash) in block.candidates() { + // Loading the candidate will also perform the conversion to the updated format and return + // that represantation. + if let Some(mut candidate_entry) = backend + .load_candidate_entry_v1(&candidate_hash) + .map_err(|e| Error::InternalError(e))? + { + // Here we patch the core bitfield for all assignments of the candidate. + for (_, approval_entry) in candidate_entry.block_assignments.iter_mut() { + if let Some(our_assignment) = approval_entry.our_assignment_mut() { + // Ensure we are actually patching a dummy bitfield produced by the `load_candidate_entry_v1` code. + // Cannot happen in practice, but better double check. + if our_assignment.assignment_bitfield() == &dummy_assignment_bitfield() { + *our_assignment.assignment_bitfield_mut() = (*core_index).into(); + } else { + gum::warn!( + target: crate::LOG_TARGET, + "Tried to convert an already valid bitfield." + ); + } + } + } + // Write the updated representation. + overlay.write_candidate_entry(candidate_entry); + counter += 1; + } + } + } + + gum::info!(target: crate::LOG_TARGET, "Migrated {} entries", counter); + + // Commit all changes to DB. + let write_ops = overlay.into_write_ops(); + backend.write(write_ops).unwrap(); + + Ok(()) +} + +// Checks if the migration doesn't leave the DB in an unsane state. +// This function is to be used in tests. +pub fn migrate_approval_db_v1_to_v2_sanity_check( + db: Arc, + config: Config, + expected_candidates: HashSet, +) -> Result<()> { + let backend = crate::DbBackend::new(db, config); + + let all_blocks = backend + .load_all_blocks() + .map_err(|e| Error::InternalError(e))? + .iter() + .filter_map(|block_hash| { + backend.load_block_entry(block_hash).map_err(|e| Error::InternalError(e)).ok()? + }) + .collect::>(); + + let mut candidates = HashSet::new(); + + // Iterate all blocks and approval entries. + for block in all_blocks { + for (core_index, candidate_hash) in block.candidates() { + // Loading the candidate will also perform the conversion to the updated format and return + // that represantation. + if let Some(mut candidate_entry) = backend + .load_candidate_entry(&candidate_hash) + .map_err(|e| Error::InternalError(e))? + { + // We expect that all assignment bitfieds have only one bit set which corresponds to the core_index in the + // candidates block entry mapping. + for (_, approval_entry) in candidate_entry.block_assignments.iter_mut() { + if let Some(our_assignment) = approval_entry.our_assignment_mut() { + assert_eq!(our_assignment.assignment_bitfield().count_ones(), 1); + assert_eq!( + our_assignment.assignment_bitfield().first_one().unwrap(), + core_index.0 as usize + ); + } + } + candidates.insert(candidate_entry.candidate.hash()); + } + } + } + + assert_eq!(candidates, expected_candidates); + + Ok(()) +} + +// Fills the db with dummy data in v1 scheme. +pub fn migrate_approval_db_v1_to_v2_fill_test_data( + db: Arc, + config: Config, +) -> Result> { + let mut backend = crate::DbBackend::new(db.clone(), config); + let mut overlay_db = crate::OverlayedBackend::new(&backend); + let mut expected_candidates = HashSet::new(); + + const RELAY_BLOCK_COUNT: u32 = 10; + + let range = StoredBlockRange(1, 11); + overlay_db.write_stored_block_range(range.clone()); + + for relay_number in 1..=RELAY_BLOCK_COUNT { + let relay_hash = Hash::repeat_byte(relay_number as u8); + let assignment_core_index = CoreIndex(relay_number); + let candidate = dummy_candidate_receipt(relay_hash); + let candidate_hash = candidate.hash(); + + let at_height = vec![relay_hash]; + + let block_entry = make_block_entry( + relay_hash, + Default::default(), + relay_number, + vec![(assignment_core_index, candidate_hash)], + ); + + let dummy_assignment = crate::approval_db::v1::OurAssignment { + cert: dummy_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }).into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + }; + + let candidate_entry = crate::approval_db::v1::CandidateEntry { + candidate, + session: 123, + block_assignments: vec![( + relay_hash, + crate::approval_db::v1::ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + our_assignment: Some(dummy_assignment), + our_approval_sig: None, + assignments: Default::default(), + approved: false, + }, + )] + .into_iter() + .collect(), + approvals: Default::default(), + }; + + overlay_db.write_blocks_at_height(relay_number, at_height.clone()); + overlay_db.write_block_entry(block_entry.clone().into()); + + expected_candidates.insert(candidate_entry.candidate.hash()); + db.write(write_candidate_entry_v1(candidate_entry, config.clone())).unwrap(); + } + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + Ok(expected_candidates) +} + +// Low level DB helper to write a candidate entry in v1 scheme. +fn write_candidate_entry_v1( + candidate_entry: crate::approval_db::v1::CandidateEntry, + config: Config, +) -> DBTransaction { + let mut tx = DBTransaction::new(); + tx.put_vec( + config.col_approval_data, + &candidate_entry_key(&candidate_entry.candidate.hash()), + candidate_entry.encode(), + ); + tx +} diff --git a/node/core/approval-voting/src/approval_db/v2/mod.rs b/node/core/approval-voting/src/approval_db/v2/mod.rs new file mode 100644 index 000000000000..dbe388e1a131 --- /dev/null +++ b/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -0,0 +1,383 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Version 1 of the DB schema. + +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_primitives::approval::{ + v1::DelayTranche, + v2::{AssignmentCertV2, CoreBitfield}, +}; +use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; +use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, + ValidatorIndex, ValidatorSignature, +}; + +use sp_consensus_slots::Slot; + +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use std::{collections::BTreeMap, sync::Arc}; + +use crate::{ + backend::{Backend, BackendWriteOp}, + persisted_entries, +}; + +const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; + +mod migration_helpers; +#[cfg(test)] +pub mod tests; + +// DB migration support. +pub use migration_helpers::{ + dummy_assignment_bitfield, migrate_approval_db_v1_to_v2, + migrate_approval_db_v1_to_v2_fill_test_data, migrate_approval_db_v1_to_v2_sanity_check, +}; + +/// `DbBackend` is a concrete implementation of the higher-level Backend trait +pub struct DbBackend { + inner: Arc, + config: Config, +} + +impl DbBackend { + /// Create a new [`DbBackend`] with the supplied key-value store and + /// config. + pub fn new(db: Arc, config: Config) -> Self { + DbBackend { inner: db, config } + } +} + +impl Backend for DbBackend { + fn load_block_entry( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } + + fn load_candidate_entry( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) + } + + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) + .map(|e| e.map(Into::into)) + } + + fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { + load_blocks_at_height(&*self.inner, &self.config, block_height) + } + + fn load_all_blocks(&self) -> SubsystemResult> { + load_all_blocks(&*self.inner, &self.config) + } + + fn load_stored_blocks(&self) -> SubsystemResult> { + load_stored_blocks(&*self.inner, &self.config) + } + + /// Atomically write the list of operations, with later operations taking precedence over prior. + fn write(&mut self, ops: I) -> SubsystemResult<()> + where + I: IntoIterator, + { + let mut tx = DBTransaction::new(); + for op in ops { + match op { + BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { + tx.put_vec( + self.config.col_approval_data, + &STORED_BLOCKS_KEY, + stored_block_range.encode(), + ); + }, + BackendWriteOp::DeleteStoredBlockRange => { + tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); + }, + BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { + tx.put_vec( + self.config.col_approval_data, + &blocks_at_height_key(h), + blocks.encode(), + ); + }, + BackendWriteOp::DeleteBlocksAtHeight(h) => { + tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); + }, + BackendWriteOp::WriteBlockEntry(block_entry) => { + let block_entry: BlockEntry = block_entry.into(); + tx.put_vec( + self.config.col_approval_data, + &block_entry_key(&block_entry.block_hash), + block_entry.encode(), + ); + }, + BackendWriteOp::DeleteBlockEntry(hash) => { + tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); + }, + BackendWriteOp::WriteCandidateEntry(candidate_entry) => { + let candidate_entry: CandidateEntry = candidate_entry.into(); + tx.put_vec( + self.config.col_approval_data, + &candidate_entry_key(&candidate_entry.candidate.hash()), + candidate_entry.encode(), + ); + }, + BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { + tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); + }, + } + } + + self.inner.write(tx).map_err(|e| e.into()) + } +} + +/// A range from earliest..last block number stored within the DB. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber); + +// slot_duration * 2 + DelayTranche gives the number of delay tranches since the +// unix epoch. +#[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)] +pub struct Tick(u64); + +/// Convenience type definition +pub type Bitfield = BitVec; + +/// The database config. +#[derive(Debug, Clone, Copy)] +pub struct Config { + /// The column family in the database where data is stored. + pub col_approval_data: u32, + /// The column of the database where rolling session window data is stored. + pub col_session_data: u32, +} + +/// Details pertaining to our assignment on a block. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct OurAssignment { + /// Our assignment certificate. + pub cert: AssignmentCertV2, + /// The tranche for which the assignment refers to. + pub tranche: DelayTranche, + /// Our validator index for the session in which the candidates were included. + pub validator_index: ValidatorIndex, + /// Whether the assignment has been triggered already. + pub triggered: bool, + /// A subset of the core indices obtained from the VRF output. + pub assignment_bitfield: CoreBitfield, +} + +/// Metadata regarding a specific tranche of assignments for a specific candidate. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct TrancheEntry { + pub tranche: DelayTranche, + // Assigned validators, and the instant we received their assignment, rounded + // to the nearest tick. + pub assignments: Vec<(ValidatorIndex, Tick)>, +} + +/// Metadata regarding approval of a particular candidate within the context of some +/// particular block. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct ApprovalEntry { + pub tranches: Vec, + pub backing_group: GroupIndex, + pub our_assignment: Option, + pub our_approval_sig: Option, + // `n_validators` bits. + pub assigned_validators: Bitfield, + pub approved: bool, +} + +/// Metadata regarding approval of a particular candidate. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct CandidateEntry { + pub candidate: CandidateReceipt, + pub session: SessionIndex, + // Assignments are based on blocks, so we need to track assignments separately + // based on the block we are looking at. + pub block_assignments: BTreeMap, + pub approvals: Bitfield, +} + +/// Metadata regarding approval of a particular block, by way of approval of the +/// candidates contained within it. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct BlockEntry { + pub block_hash: Hash, + pub block_number: BlockNumber, + pub parent_hash: Hash, + pub session: SessionIndex, + pub slot: Slot, + /// Random bytes derived from the VRF submitted within the block by the block + /// author as a credential and used as input to approval assignment criteria. + pub relay_vrf_story: [u8; 32], + // The candidates included as-of this block and the index of the core they are + // leaving. Sorted ascending by core index. + pub candidates: Vec<(CoreIndex, CandidateHash)>, + // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. + // The i'th bit is `true` iff the candidate has been approved in the context of this + // block. The block can be considered approved if the bitfield has all bits set to `true`. + pub approved_bitfield: Bitfield, + pub children: Vec, +} + +impl From for Tick { + fn from(tick: crate::Tick) -> Tick { + Tick(tick) + } +} + +impl From for crate::Tick { + fn from(tick: Tick) -> crate::Tick { + tick.0 + } +} + +/// Errors while accessing things from the DB. +#[derive(Debug, derive_more::From, derive_more::Display)] +pub enum Error { + Io(std::io::Error), + InvalidDecoding(parity_scale_codec::Error), + InternalError(SubsystemError), +} + +impl std::error::Error for Error {} + +/// Result alias for DB errors. +pub type Result = std::result::Result; + +pub(crate) fn load_decode( + store: &dyn Database, + col_approval_data: u32, + key: &[u8], +) -> Result> { + match store.get(col_approval_data, key)? { + None => Ok(None), + Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), + } +} + +/// The key a given block entry is stored under. +pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] { + const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(block_hash.as_ref()); + + key +} + +/// The key a given candidate entry is stored under. +pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { + const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); + + key +} + +/// The key a set of block hashes corresponding to a block number is stored under. +pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { + const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; + + let mut key = [0u8; 12 + 4]; + key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); + block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); + + key +} + +/// Return all blocks which have entries in the DB, ascending, by height. +pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult> { + let mut hashes = Vec::new(); + if let Some(stored_blocks) = load_stored_blocks(store, config)? { + for height in stored_blocks.0..stored_blocks.1 { + let blocks = load_blocks_at_height(store, config, &height)?; + hashes.extend(blocks); + } + } + + Ok(hashes) +} + +/// Load the stored-blocks key from the state. +pub fn load_stored_blocks( + store: &dyn Database, + config: &Config, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a blocks-at-height entry for a given block number. +pub fn load_blocks_at_height( + store: &dyn Database, + config: &Config, + block_number: &BlockNumber, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) + .map(|x| x.unwrap_or_default()) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a block entry from the aux store. +pub fn load_block_entry( + store: &dyn Database, + config: &Config, + block_hash: &Hash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a candidate entry from the aux store in current version format. +pub fn load_candidate_entry( + store: &dyn Database, + config: &Config, + candidate_hash: &CandidateHash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a candidate entry from the aux store in v1 format. +pub fn load_candidate_entry_v1( + store: &dyn Database, + config: &Config, + candidate_hash: &CandidateHash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} diff --git a/node/core/approval-voting/src/approval_db/v1/tests.rs b/node/core/approval-voting/src/approval_db/v2/tests.rs similarity index 100% rename from node/core/approval-voting/src/approval_db/v1/tests.rs rename to node/core/approval-voting/src/approval_db/v2/tests.rs index fb22b08c00ff..32db5545f002 100644 --- a/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -42,10 +42,6 @@ fn make_db() -> (DbBackend, Arc) { (DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer) } -fn make_bitvec(len: usize) -> BitVec { - bitvec::bitvec![u8, BitOrderLsb0; 0; len] -} - fn make_block_entry( block_hash: Hash, parent_hash: Hash, @@ -65,6 +61,10 @@ fn make_block_entry( } } +fn make_bitvec(len: usize) -> BitVec { + bitvec::bitvec![u8, BitOrderLsb0; 0; len] +} + fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { let mut c = dummy_candidate_receipt(dummy_hash()); diff --git a/node/core/approval-voting/src/backend.rs b/node/core/approval-voting/src/backend.rs index 87d67c52c467..d3af78b3036c 100644 --- a/node/core/approval-voting/src/backend.rs +++ b/node/core/approval-voting/src/backend.rs @@ -27,7 +27,7 @@ use polkadot_primitives::{BlockNumber, CandidateHash, Hash}; use std::collections::HashMap; use super::{ - approval_db::v1::StoredBlockRange, + approval_db::v2::StoredBlockRange, persisted_entries::{BlockEntry, CandidateEntry}, }; @@ -44,6 +44,8 @@ pub enum BackendWriteOp { } /// An abstraction over backend storage for the logic of this subsystem. +/// Implementation must always target latest storage version, but we might introduce +/// methods to enable db migration, like `load_candidate_entry_v1`. pub trait Backend { /// Load a block entry from the DB. fn load_block_entry(&self, hash: &Hash) -> SubsystemResult>; @@ -52,6 +54,11 @@ pub trait Backend { &self, candidate_hash: &CandidateHash, ) -> SubsystemResult>; + /// Load a candidate entry from the DB with scheme version 1. + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult>; /// Load all blocks at a specific height. fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult>; /// Load all block from the DB. diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 899fec0a45f2..a1d714204bba 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -32,6 +32,7 @@ use sp_application_crypto::ByteArray; use merlin::Transcript; use schnorrkel::vrf::VRFInOut; +use super::approval_db::v2::dummy_assignment_bitfield; use itertools::Itertools; use std::collections::{hash_map::Entry, HashMap}; @@ -73,10 +74,15 @@ impl OurAssignment { pub(crate) fn assignment_bitfield(&self) -> &CoreBitfield { &self.assignment_bitfield } + + // Needed for v1 to v2 db migration. + pub(crate) fn assignment_bitfield_mut(&mut self) -> &mut CoreBitfield { + &mut self.assignment_bitfield + } } -impl From for OurAssignment { - fn from(entry: crate::approval_db::v1::OurAssignment) -> Self { +impl From for OurAssignment { + fn from(entry: crate::approval_db::v2::OurAssignment) -> Self { OurAssignment { cert: entry.cert, tranche: entry.tranche, @@ -87,7 +93,7 @@ impl From for OurAssignment { } } -impl From for crate::approval_db::v1::OurAssignment { +impl From for crate::approval_db::v2::OurAssignment { fn from(entry: OurAssignment) -> Self { Self { cert: entry.cert, @@ -817,6 +823,22 @@ fn is_in_backing_group( validator_groups.get(group).map_or(false, |g| g.contains(&validator)) } +/// Migration helpers. +impl From for OurAssignment { + fn from(value: crate::approval_db::v1::OurAssignment) -> Self { + Self { + cert: value.cert.into(), + tranche: value.tranche, + validator_index: value.validator_index, + // Whether the assignment has been triggered already. + triggered: value.triggered, + // This is a dummy value, assignment bitfield will be set later. + // The migration sanity check will test for 1 single bit being set here. + assignment_bitfield: dummy_assignment_bitfield(), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index e04398d5465c..3e3196be118a 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -56,7 +56,7 @@ use futures::{channel::oneshot, prelude::*}; use std::collections::HashMap; -use super::approval_db::v1; +use super::approval_db::v2; use crate::{ backend::{Backend, OverlayedBackend}, criteria::{AssignmentCriteria, OurAssignment}, @@ -498,7 +498,7 @@ pub(crate) async fn handle_new_head( ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await; } - let block_entry = v1::BlockEntry { + let block_entry = v2::BlockEntry { block_hash, parent_hash: block_header.parent_hash, block_number: block_header.number, @@ -589,7 +589,7 @@ pub(crate) async fn handle_new_head( #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{approval_db::v1::DbBackend, RuntimeInfo, RuntimeInfoConfig}; + use crate::{approval_db::v2::DbBackend, RuntimeInfo, RuntimeInfoConfig}; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; use polkadot_node_primitives::{ @@ -609,7 +609,7 @@ pub(crate) mod tests { pub(crate) use sp_runtime::{Digest, DigestItem}; use std::{pin::Pin, sync::Arc}; - use crate::{approval_db::v1::Config as DatabaseConfig, criteria, BlockEntry}; + use crate::{approval_db::v2::Config as DatabaseConfig, criteria, BlockEntry}; const DATA_COL: u32 = 0; const SESSION_DATA_COL: u32 = 1; @@ -1256,7 +1256,7 @@ pub(crate) mod tests { let (state, mut session_info_provider) = single_session_state(); overlay_db.write_block_entry( - v1::BlockEntry { + v2::BlockEntry { block_hash: parent_hash, parent_hash: Default::default(), block_number: 4, @@ -1298,7 +1298,7 @@ pub(crate) mod tests { // the first candidate should be insta-approved // the second should not let entry: BlockEntry = - v1::load_block_entry(db_writer.as_ref(), &TEST_CONFIG, &hash) + v2::load_block_entry(db_writer.as_ref(), &TEST_CONFIG, &hash) .unwrap() .unwrap() .into(); diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 30e091ae08cb..020e2f60664b 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -82,7 +82,7 @@ use persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}; use time::{slot_number_to_tick, Clock, ClockExt, SystemClock, Tick}; mod approval_checking; -mod approval_db; +pub mod approval_db; mod backend; mod criteria; mod import; @@ -91,7 +91,7 @@ mod persisted_entries; mod time; use crate::{ - approval_db::v1::{Config as DatabaseConfig, DbBackend}, + approval_db::v2::{Config as DatabaseConfig, DbBackend}, backend::{Backend, OverlayedBackend}, criteria::InvalidAssignmentReason, }; @@ -112,7 +112,7 @@ const APPROVAL_CACHE_SIZE: NonZeroUsize = match NonZeroUsize::new(1024) { const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds. const APPROVAL_DELAY: Tick = 2; -const LOG_TARGET: &str = "parachain::approval-voting"; +pub(crate) const LOG_TARGET: &str = "parachain::approval-voting"; /// Configuration for the approval voting subsystem #[derive(Debug, Clone)] @@ -372,11 +372,11 @@ impl ApprovalVotingSubsystem { /// Revert to the block corresponding to the specified `hash`. /// The operation is not allowed for blocks older than the last finalized one. pub fn revert_to(&self, hash: Hash) -> Result<(), SubsystemError> { - let config = approval_db::v1::Config { + let config = approval_db::v2::Config { col_approval_data: self.db_config.col_approval_data, col_session_data: self.db_config.col_session_data, }; - let mut backend = approval_db::v1::DbBackend::new(self.db.clone(), config); + let mut backend = approval_db::v2::DbBackend::new(self.db.clone(), config); let mut overlay = OverlayedBackend::new(&backend); ops::revert_to(&mut overlay, hash)?; diff --git a/node/core/approval-voting/src/ops.rs b/node/core/approval-voting/src/ops.rs index 4d6dc5e7ad66..1813ef82cedd 100644 --- a/node/core/approval-voting/src/ops.rs +++ b/node/core/approval-voting/src/ops.rs @@ -25,7 +25,7 @@ use polkadot_primitives::{BlockNumber, CandidateHash, CandidateReceipt, GroupInd use std::collections::{hash_map::Entry, BTreeMap, HashMap}; use super::{ - approval_db::v1::{OurAssignment, StoredBlockRange}, + approval_db::v2::{OurAssignment, StoredBlockRange}, backend::{Backend, OverlayedBackend}, persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}, LOG_TARGET, diff --git a/node/core/approval-voting/src/persisted_entries.rs b/node/core/approval-voting/src/persisted_entries.rs index e2a55f022194..bf9b0b44d26f 100644 --- a/node/core/approval-voting/src/persisted_entries.rs +++ b/node/core/approval-voting/src/persisted_entries.rs @@ -33,7 +33,7 @@ use sp_consensus_slots::Slot; use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice}; use std::collections::BTreeMap; -use crate::approval_db::v1::Bitfield; +use crate::approval_db::v2::Bitfield; use super::{criteria::OurAssignment, time::Tick}; @@ -58,8 +58,8 @@ impl TrancheEntry { } } -impl From for TrancheEntry { - fn from(entry: crate::approval_db::v1::TrancheEntry) -> Self { +impl From for TrancheEntry { + fn from(entry: crate::approval_db::v2::TrancheEntry) -> Self { TrancheEntry { tranche: entry.tranche, assignments: entry.assignments.into_iter().map(|(v, t)| (v, t.into())).collect(), @@ -67,7 +67,7 @@ impl From for TrancheEntry { } } -impl From for crate::approval_db::v1::TrancheEntry { +impl From for crate::approval_db::v2::TrancheEntry { fn from(entry: TrancheEntry) -> Self { Self { tranche: entry.tranche, @@ -115,6 +115,11 @@ impl ApprovalEntry { self.our_assignment.as_ref() } + // Needed for v1 to v2 migration. + pub fn our_assignment_mut(&mut self) -> Option<&mut OurAssignment> { + self.our_assignment.as_mut() + } + // Note that our assignment is triggered. No-op if already triggered. pub fn trigger_our_assignment( &mut self, @@ -234,8 +239,8 @@ impl ApprovalEntry { } } -impl From for ApprovalEntry { - fn from(entry: crate::approval_db::v1::ApprovalEntry) -> Self { +impl From for ApprovalEntry { + fn from(entry: crate::approval_db::v2::ApprovalEntry) -> Self { ApprovalEntry { tranches: entry.tranches.into_iter().map(Into::into).collect(), backing_group: entry.backing_group, @@ -247,7 +252,7 @@ impl From for ApprovalEntry { } } -impl From for crate::approval_db::v1::ApprovalEntry { +impl From for crate::approval_db::v2::ApprovalEntry { fn from(entry: ApprovalEntry) -> Self { Self { tranches: entry.tranches.into_iter().map(Into::into).collect(), @@ -305,8 +310,8 @@ impl CandidateEntry { } } -impl From for CandidateEntry { - fn from(entry: crate::approval_db::v1::CandidateEntry) -> Self { +impl From for CandidateEntry { + fn from(entry: crate::approval_db::v2::CandidateEntry) -> Self { CandidateEntry { candidate: entry.candidate, session: entry.session, @@ -320,7 +325,7 @@ impl From for CandidateEntry { } } -impl From for crate::approval_db::v1::CandidateEntry { +impl From for crate::approval_db::v2::CandidateEntry { fn from(entry: CandidateEntry) -> Self { Self { candidate: entry.candidate, @@ -429,8 +434,8 @@ impl BlockEntry { } } -impl From for BlockEntry { - fn from(entry: crate::approval_db::v1::BlockEntry) -> Self { +impl From for BlockEntry { + fn from(entry: crate::approval_db::v2::BlockEntry) -> Self { BlockEntry { block_hash: entry.block_hash, parent_hash: entry.parent_hash, @@ -445,7 +450,7 @@ impl From for BlockEntry { } } -impl From for crate::approval_db::v1::BlockEntry { +impl From for crate::approval_db::v2::BlockEntry { fn from(entry: BlockEntry) -> Self { Self { block_hash: entry.block_hash, @@ -460,3 +465,32 @@ impl From for crate::approval_db::v1::BlockEntry { } } } + +/// Migration helpers. +impl From for CandidateEntry { + fn from(value: crate::approval_db::v1::CandidateEntry) -> Self { + Self { + approvals: value.approvals, + block_assignments: value + .block_assignments + .into_iter() + .map(|(h, ae)| (h, ae.into())) + .collect(), + candidate: value.candidate, + session: value.session, + } + } +} + +impl From for ApprovalEntry { + fn from(value: crate::approval_db::v1::ApprovalEntry) -> Self { + ApprovalEntry { + tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(), + backing_group: value.backing_group, + our_assignment: value.our_assignment.map(|assignment| assignment.into()), + our_approval_sig: value.our_approval_sig, + assigned_validators: value.assignments, + approved: value.approved, + } + } +} diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index a548e3db3db1..b53f46f18d45 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -56,7 +56,7 @@ use std::{ }; use super::{ - approval_db::v1::StoredBlockRange, + approval_db::v2::StoredBlockRange, backend::BackendWriteOp, import::tests::{ garbage_vrf_signature, AllowedSlots, BabeEpoch, BabeEpochConfiguration, @@ -116,7 +116,7 @@ fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHan #[cfg(test)] pub mod test_constants { - use crate::approval_db::v1::Config as DatabaseConfig; + use crate::approval_db::v2::Config as DatabaseConfig; const DATA_COL: u32 = 0; const SESSION_DATA_COL: u32 = 1; @@ -290,6 +290,13 @@ impl Backend for TestStoreInner { Ok(self.candidate_entries.get(candidate_hash).cloned()) } + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + self.load_candidate_entry(candidate_hash) + } + fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult> { Ok(self.blocks_at_height.get(height).cloned().unwrap_or_default()) } @@ -363,6 +370,13 @@ impl Backend for TestStore { store.load_candidate_entry(candidate_hash) } + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + self.load_candidate_entry(candidate_hash) + } + fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult> { let store = self.store.lock(); store.load_blocks_at_height(height) @@ -2271,7 +2285,7 @@ fn subsystem_validate_approvals_cache() { let mut assignments = HashMap::new(); let _ = assignments.insert( CoreIndex(0), - approval_db::v1::OurAssignment { + approval_db::v2::OurAssignment { cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) .into(), assignment_bitfield: CoreIndex(0u32).into(), @@ -2284,7 +2298,7 @@ fn subsystem_validate_approvals_cache() { let _ = assignments.insert( CoreIndex(0), - approval_db::v1::OurAssignment { + approval_db::v2::OurAssignment { cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] .try_into() @@ -2508,7 +2522,7 @@ where let mut assignments = HashMap::new(); let _ = assignments.insert( CoreIndex(0), - approval_db::v1::OurAssignment { + approval_db::v2::OurAssignment { cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) .into(), assignment_bitfield: CoreIndex(0).into(), diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 1060ad9f6b70..3001a53aec02 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -15,8 +15,6 @@ #![cfg(feature = "full-node")] -use kvdb::DBTransaction; - use super::{columns, other_io_error, DatabaseKind, LOG_TARGET}; use std::{ fs, io, @@ -24,6 +22,9 @@ use std::{ str::FromStr, }; +use polkadot_node_core_approval_voting::approval_db::v2::{ + migrate_approval_db_v1_to_v2, Config as ApprovalDbConfig, +}; type Version = u32; /// Version file name. @@ -41,6 +42,8 @@ pub enum Error { CorruptedVersionFile, #[error("Parachains DB has a future version (expected {current:?}, found {got:?})")] FutureVersion { current: Version, got: Version }, + #[error("Parachain DB migration failed")] + MigrationFailed, } impl From for io::Error { @@ -132,17 +135,49 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), }) } +// Migrade approval voting database. `OurAssignment` has been changed to support the v2 assignments. +// As these are backwards compatible, we'll convert the old entries in the new format. fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { gum::info!(target: LOG_TARGET, "Migrating parachains db from version 2 to version 3 ..."); + use polkadot_node_subsystem_util::database::{ + kvdb_impl::DbAdapter as RocksDbAdapter, paritydb_impl::DbAdapter as ParityDbAdapter, + }; + use std::sync::Arc; + + let approval_db_config = ApprovalDbConfig { + col_approval_data: super::REAL_COLUMNS.col_approval_data, + col_session_data: super::REAL_COLUMNS.col_session_window_data, + }; + + let result = match db_kind { + DatabaseKind::ParityDB => { + let db = ParityDbAdapter::new( + parity_db::Db::open(&paritydb_version_2_config(path)) + .map_err(|e| other_io_error(format!("Error opening db {:?}", e)))?, + super::columns::v2::ORDERED_COL, + ); - match db_kind { - DatabaseKind::ParityDB => paritydb_migrate_from_version_2_to_3(path), - DatabaseKind::RocksDB => rocksdb_migrate_from_version_2_to_3(path), - } - .and_then(|result| { - gum::info!(target: LOG_TARGET, "Migration complete! "); - Ok(result) - }) + migrate_approval_db_v1_to_v2(Arc::new(db), approval_db_config) + .map_err(|_| Error::MigrationFailed)?; + }, + DatabaseKind::RocksDB => { + let db_path = path + .to_str() + .ok_or_else(|| super::other_io_error("Invalid database path".into()))?; + let db_cfg = + kvdb_rocksdb::DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + let db = RocksDbAdapter::new( + kvdb_rocksdb::Database::open(&db_cfg, db_path)?, + &super::columns::v2::ORDERED_COL, + ); + + migrate_approval_db_v1_to_v2(Arc::new(db), approval_db_config) + .map_err(|_| Error::MigrationFailed)?; + }, + }; + + gum::info!(target: LOG_TARGET, "Migration complete! "); + Ok(result) } /// Migration from version 0 to version 1: @@ -296,44 +331,10 @@ fn paritydb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { Ok(()) } -/// Migration from version 2 to version 3. -/// Clears the approval voting db column which changed format and cannot be migrated. -fn paritydb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { - parity_db::clear_column( - path, - super::columns::v2::COL_APPROVAL_DATA.try_into().expect("Invalid column ID"), - ) - .map_err(|e| other_io_error(format!("Error clearing column {:?}", e)))?; - - Ok(()) -} - -/// Migration from version 2 to version 3. -/// Clears the approval voting db column because `OurAssignment` changed format. Not all -/// instances of it can be converted to new version so we need to wipe it clean. -fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { - use kvdb::DBOp; - use kvdb_rocksdb::{Database, DatabaseConfig}; - - let db_path = path - .to_str() - .ok_or_else(|| super::other_io_error("Invalid database path".into()))?; - let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); - let db = Database::open(&db_cfg, db_path)?; - - // Wipe all entries in one operation. - let ops = vec![DBOp::DeletePrefix { - col: super::columns::v2::COL_APPROVAL_DATA, - prefix: kvdb::DBKey::from_slice(b""), - }]; - - let transaction = DBTransaction { ops }; - db.write(transaction)?; - Ok(()) -} #[cfg(test)] mod tests { use super::{columns::v2::*, *}; + use polkadot_node_core_approval_voting::approval_db::v2::migrate_approval_db_v1_to_v2_fill_test_data; #[test] fn test_paritydb_migrate_0_to_1() { @@ -469,4 +470,47 @@ mod tests { Some("0xdeadb00b".as_bytes().to_vec()) ); } + + #[test] + fn test_migrate_2_to_3() { + use kvdb_rocksdb::{Database, DatabaseConfig}; + use polkadot_node_core_approval_voting::approval_db::v2::migrate_approval_db_v1_to_v2_sanity_check; + use polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter; + + let approval_cfg = ApprovalDbConfig { + col_approval_data: crate::parachains_db::REAL_COLUMNS.col_approval_data, + col_session_data: crate::parachains_db::REAL_COLUMNS.col_session_window_data, + }; + + let db_dir = tempfile::tempdir().unwrap(); + let db_path = db_dir.path().to_str().unwrap(); + let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(db_dir.path()), "2").expect("Failed to write DB version"); + let expected_candidates = { + let db = Database::open(&db_cfg, db_path.clone()).unwrap(); + assert_eq!(db.num_columns(), super::columns::v2::NUM_COLUMNS as u32); + let db = DbAdapter::new(db, columns::v2::ORDERED_COL); + // Fill the approval voting column with test data. + migrate_approval_db_v1_to_v2_fill_test_data( + std::sync::Arc::new(db), + approval_cfg.clone(), + ) + .unwrap() + }; + + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); + + let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).unwrap(); + let db = DbAdapter::new(db, columns::v2::ORDERED_COL); + + migrate_approval_db_v1_to_v2_sanity_check( + std::sync::Arc::new(db), + approval_cfg.clone(), + expected_candidates, + ) + .unwrap(); + } } From 0f9e1eccd91382cc1ee6edbf89895ccb3a531ad1 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 24 May 2023 15:12:41 +0000 Subject: [PATCH 061/105] fix comment Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/approval_db/v2/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/approval-voting/src/approval_db/v2/mod.rs b/node/core/approval-voting/src/approval_db/v2/mod.rs index dbe388e1a131..120d350a115e 100644 --- a/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Version 1 of the DB schema. +//! Version 2 of the DB schema. use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ From fe5b5484e72de8cfa0b3b90b4a2c0d4148895cce Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 25 May 2023 10:19:34 +0000 Subject: [PATCH 062/105] Fix AcceptedDuplicate, test and import log Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/lib.rs | 21 ++++++- node/core/approval-voting/src/tests.rs | 76 ++++++++++++++++++++++++-- 2 files changed, 89 insertions(+), 8 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 020e2f60664b..ec4e964931c9 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1903,6 +1903,7 @@ where let mut assigned_candidate_hashes = Vec::new(); for candidate_index in candidate_indices.iter_ones() { + println!("{:?}", &block_entry); let (claimed_core_index, assigned_candidate_hash) = match block_entry.candidate(candidate_index) { Some((c, h)) => (*c, *h), @@ -2000,7 +2001,7 @@ where let mut actions = Vec::new(); let res = { - let mut is_duplicate = false; + let mut is_duplicate = true; // Import the assignments for all cores in the cert. for (assigned_candidate_hash, candidate_index) in assigned_candidate_hashes.iter().zip(candidate_indices.iter_ones()) @@ -2028,7 +2029,7 @@ where Vec::new(), )), }; - is_duplicate |= approval_entry.is_assigned(assignment.validator); + is_duplicate &= approval_entry.is_assigned(assignment.validator); approval_entry.import_assignment(tranche, assignment.validator, tick_now); check_and_import_assignment_span.add_uint_tag("tranche", tranche as u64); @@ -2052,9 +2053,13 @@ where db.write_candidate_entry(candidate_entry.into()); } + // Since we don't account for tranche in distribution message fingerprinting, some validators + // can be assigned to the same core (VRF modulo vs VRF delay). These can be safely ignored ignored. + // However, if an assignment is for multiple cores (these are only tranche0), we cannot ignore it, + // because it would mean ignoring other non duplicate assignments. if is_duplicate { AssignmentCheckResult::AcceptedDuplicate - } else { + } else if candidate_indices.count_ones() > 1 { gum::trace!( target: LOG_TARGET, validator = assignment.validator.0, @@ -2063,6 +2068,16 @@ where "Imported assignments for multiple cores.", ); + AssignmentCheckResult::Accepted + } else { + gum::trace!( + target: LOG_TARGET, + validator = assignment.validator.0, + candidate_hashes = ?assigned_candidate_hashes, + assigned_cores = ?claimed_core_indices, + "Imported assignment for a single core.", + ); + AssignmentCheckResult::Accepted } }; diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index b53f46f18d45..883a8fdef3e6 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -664,6 +664,38 @@ async fn check_and_import_assignment( rx } +async fn check_and_import_assignment_v2( + overseer: &mut VirtualOverseer, + block_hash: Hash, + core_indices: Vec, + validator: ValidatorIndex, +) -> oneshot::Receiver { + let (tx, rx) = oneshot::channel(); + overseer_send( + overseer, + FromOrchestra::Communication { + msg: ApprovalVotingMessage::CheckAndImportAssignment( + IndirectAssignmentCertV2 { + block_hash, + validator, + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: core_indices + .clone() + .into_iter() + .map(|c| CoreIndex(c)) + .collect::>() + .try_into() + .unwrap(), + }), + }, + core_indices.try_into().unwrap(), + tx, + ), + }, + ) + .await; + rx +} struct BlockConfig { slot: Slot, candidates: Option>, @@ -1357,9 +1389,22 @@ fn subsystem_accepts_duplicate_assignment() { } ); - let block_hash = Hash::repeat_byte(0x01); - let candidate_index = 0; let validator = ValidatorIndex(0); + let candidate_index = 0; + let block_hash = Hash::repeat_byte(0x01); + + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt + }; + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt + }; + let candidate_index1 = 0; + let candidate_index2 = 1; // Add block hash 00. ChainBuilder::new() @@ -1367,21 +1412,30 @@ fn subsystem_accepts_duplicate_assignment() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: Some(vec![ + (candidate_receipt1, CoreIndex(0), GroupIndex(1)), + (candidate_receipt2, CoreIndex(1), GroupIndex(1)), + ]), + session_info: None, + }, ) .build(&mut virtual_overseer) .await; - let rx = check_and_import_assignment( + // Initial assignment. + let rx = check_and_import_assignment_v2( &mut virtual_overseer, block_hash, - candidate_index, + vec![candidate_index1, candidate_index2], validator, ) .await; assert_eq!(rx.await, Ok(AssignmentCheckResult::Accepted)); + // Test with single assigned core. let rx = check_and_import_assignment( &mut virtual_overseer, block_hash, @@ -1392,6 +1446,18 @@ fn subsystem_accepts_duplicate_assignment() { assert_eq!(rx.await, Ok(AssignmentCheckResult::AcceptedDuplicate)); + // Test with multiple assigned cores. This cannot happen in practice, as tranche0 assignments + // are sent first, but we should still ensure correct behavior. + let rx = check_and_import_assignment_v2( + &mut virtual_overseer, + block_hash, + vec![candidate_index1, candidate_index2], + validator, + ) + .await; + + assert_eq!(rx.await, Ok(AssignmentCheckResult::AcceptedDuplicate)); + virtual_overseer }); } From a4fae3f1c3ca4623cbf48e7f0ebff3e0a8a87d6c Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 25 May 2023 10:26:57 +0000 Subject: [PATCH 063/105] clippy Signed-off-by: Andrei Sandu --- .../approval-voting/src/approval_db/v2/migration_helpers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index 95004f63b7b1..22f93443840d 100644 --- a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -244,7 +244,7 @@ pub fn migrate_approval_db_v1_to_v2_fill_test_data( overlay_db.write_block_entry(block_entry.clone().into()); expected_candidates.insert(candidate_entry.candidate.hash()); - db.write(write_candidate_entry_v1(candidate_entry, config.clone())).unwrap(); + db.write(write_candidate_entry_v1(candidate_entry, config)).unwrap(); } let write_ops = overlay_db.into_write_ops(); From 14c3643b6d8b74bdc9194a7c2d2b9f38f09f71a8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 25 May 2023 13:22:47 +0000 Subject: [PATCH 064/105] fmt Signed-off-by: Andrei Sandu --- .../dispute-coordinator/src/initialized.rs | 101 +++++++++--------- 1 file changed, 51 insertions(+), 50 deletions(-) diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 81134a43a3a0..b5c548bf1ffd 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -214,61 +214,62 @@ impl Initialized { gum::trace!(target: LOG_TARGET, "Waiting for message"); let mut overlay_db = OverlayedBackend::new(backend); let default_confirm = Box::new(|| Ok(())); - let confirm_write = - match MuxedMessage::receive(ctx, &mut self.participation_receiver).await? { - MuxedMessage::Participation(msg) => { - gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); - let ParticipationStatement { - session, + let confirm_write = match MuxedMessage::receive(ctx, &mut self.participation_receiver) + .await? + { + MuxedMessage::Participation(msg) => { + gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); + let ParticipationStatement { + session, + candidate_hash, + candidate_receipt, + outcome, + } = self.participation.get_participation_result(ctx, msg).await?; + if let Some(valid) = outcome.validity() { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + ?valid, + "Issuing local statement based on participation outcome." + ); + self.issue_local_statement( + ctx, + &mut overlay_db, candidate_hash, candidate_receipt, - outcome, - } = self.participation.get_participation_result(ctx, msg).await?; - if let Some(valid) = outcome.validity() { - gum::trace!( - target: LOG_TARGET, - ?session, - ?candidate_hash, - ?valid, - "Issuing local statement based on participation outcome." - ); - self.issue_local_statement( - ctx, - &mut overlay_db, - candidate_hash, - candidate_receipt, - session, - valid, - clock.now(), - ) - .await?; - } else { - gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); - } + session, + valid, + clock.now(), + ) + .await?; + } else { + gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); + } + default_confirm + }, + MuxedMessage::Subsystem(msg) => match msg { + FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); + self.process_active_leaves_update( + ctx, + &mut overlay_db, + update, + clock.now(), + ) + .await?; default_confirm }, - MuxedMessage::Subsystem(msg) => match msg { - FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { - gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); - self.process_active_leaves_update( - ctx, - &mut overlay_db, - update, - clock.now(), - ) - .await?; - default_confirm - }, - FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { - gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); - self.scraper.process_finalized_block(&n); - default_confirm - }, - FromOrchestra::Communication { msg } => - self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); + self.scraper.process_finalized_block(&n); + default_confirm }, - }; + FromOrchestra::Communication { msg } => + self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, + }, + }; if !overlay_db.is_empty() { let ops = overlay_db.into_write_ops(); From 0bc5aa80984b6b451dc0c5ff61e768c4158b4626 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 26 May 2023 11:02:49 +0000 Subject: [PATCH 065/105] Fix `get_approval_signatures` Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index a950b8d660c9..2e96198bd084 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -421,7 +421,7 @@ struct BlockEntry { candidates: Vec, /// The session index of this block. session: SessionIndex, - /// Approval entries for whole block. These also contain all approvals in the cae of multiple candidates + /// Approval entries for whole block. These also contain all approvals in the case of multiple candidates /// being claimed by assignments. approval_entries: HashMap<(ValidatorIndex, CandidateBitfield), ApprovalEntry>, } @@ -1550,7 +1550,15 @@ impl State { approval_entry .get_approvals() .into_iter() - .map(|approval| (approval.validator, approval.signature)) + // `get_approvals` gives all approvals for all candidates for a given validator. + // We need to restrict to a specific candidate. + .filter_map(|approval| { + if approval.candidate_index == index { + Some((approval.validator, approval.signature)) + } else { + None + } + }) }) .collect::>(); all_sigs.extend(sigs); From 574c92b1c6d6a18e02058af48092987ebb64d2d9 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 26 May 2023 15:21:09 +0000 Subject: [PATCH 066/105] Add ApprovalEntry::get_approval Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 26 ++++++++----------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 2e96198bd084..ecd538d9c01b 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -213,6 +213,14 @@ impl ApprovalEntry { self.approvals.values().cloned().collect::>() } + // Get the approval for a specific candidate index. + pub fn get_approval( + &self, + candidate_index: CandidateIndex, + ) -> Option { + self.approvals.get(&candidate_index).cloned() + } + // Get validator index. pub fn get_validator_index(&self) -> ValidatorIndex { self.validator_index @@ -434,7 +442,7 @@ impl BlockEntry { pub fn insert_approval_entry(&mut self, entry: ApprovalEntry) -> &mut ApprovalEntry { // First map one entry per candidate to the same key we will use in `approval_entries`. - // Key is (Validator_index, Vec) that links the `ApprovalEntry` to the (K,V) + // Key is (Validator_index, CandidateBitfield) that links the `ApprovalEntry` to the (K,V) // entry in `candidate_entry.messages`. for claimed_candidate_index in entry.candidates.iter_ones() { match self.candidates.get_mut(claimed_candidate_index) { @@ -1546,20 +1554,8 @@ impl State { let sigs = block_entry .get_approval_entries(index) .into_iter() - .flat_map(|approval_entry| { - approval_entry - .get_approvals() - .into_iter() - // `get_approvals` gives all approvals for all candidates for a given validator. - // We need to restrict to a specific candidate. - .filter_map(|approval| { - if approval.candidate_index == index { - Some((approval.validator, approval.signature)) - } else { - None - } - }) - }) + .filter_map(|approval_entry| approval_entry.get_approval(index)) + .map(|approval| (approval.validator, approval.signature)) .collect::>(); all_sigs.extend(sigs); } From 4c04699acd95c87e5859b947e812d47252048762 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 26 May 2023 15:30:22 +0000 Subject: [PATCH 067/105] disable v2 assignments Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index a1d714204bba..ff374a162e4e 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -282,7 +282,7 @@ impl AssignmentCriteria for RealAssignmentCriteria { config: &Config, leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>, ) -> HashMap { - compute_assignments(keystore, relay_vrf_story, config, leaving_cores, true) + compute_assignments(keystore, relay_vrf_story, config, leaving_cores, false) } fn check_assignment_cert( From 0be0201eeee3488767b08d868bdf78dff910779e Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 26 May 2023 15:51:21 +0000 Subject: [PATCH 068/105] review feedback and print remove Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 7 +++---- node/core/approval-voting/src/lib.rs | 1 - node/core/approval-voting/src/tests.rs | 2 -- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index ff374a162e4e..6c5c6ec59435 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -489,7 +489,8 @@ fn compute_relay_vrf_modulo_assignments_v2( assignments: &mut HashMap, ) { let mut assigned_cores = Vec::new(); - // for rvm_sample in 0..config.relay_vrf_modulo_samples { + let leaving_cores = leaving_cores.iter().map(|(_, core)| core).collect::>(); + let maybe_assignment = { let assigned_cores = &mut assigned_cores; assignments_key.vrf_sign_extra_after_check( @@ -501,9 +502,7 @@ fn compute_relay_vrf_modulo_assignments_v2( config.n_cores, ) .into_iter() - .filter(|core| { - leaving_cores.iter().map(|(_, core)| core).collect::>().contains(&core) - }) + .filter(|core| leaving_cores.contains(&core)) .collect::>(); if !assigned_cores.is_empty() { diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index ec4e964931c9..c3b3b0bdf13b 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1903,7 +1903,6 @@ where let mut assigned_candidate_hashes = Vec::new(); for candidate_index in candidate_indices.iter_ones() { - println!("{:?}", &block_entry); let (claimed_core_index, assigned_candidate_hash) = match block_entry.candidate(candidate_index) { Some((c, h)) => (*c, *h), diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 883a8fdef3e6..0f7365fdfe9a 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -2725,14 +2725,12 @@ async fn step_until_done(clock: &MockClock) { futures_timer::Delay::new(Duration::from_millis(200)).await; let mut clock = clock.inner.lock(); if let Some(tick) = clock.next_wakeup() { - println!("TICK: {:?}", tick); relevant_ticks.push(tick); clock.set_tick(tick); } else { break } } - println!("relevant_ticks: {:?}", relevant_ticks); } #[test] From 496f0c588dadb7a4139cace5c3777df67d287dcf Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 31 May 2023 12:39:55 +0000 Subject: [PATCH 069/105] Fix logging Signed-off-by: Andrei Sandu --- node/network/bridge/src/network.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index a8475dada263..012dbfe5361c 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -61,13 +61,21 @@ pub(crate) fn send_message( encoded }; + // optimization: generate the protocol name once. + let protocol_name = protocol_names.get_name(peer_set, version); + gum::trace!( + target: LOG_TARGET, + ?peers, + ?version, + ?protocol_name, + ?message, + "Sending message to peers", + ); + // optimization: avoid cloning the message for the last peer in the // list. The message payload can be quite large. If the underlying // network used `Bytes` this would not be necessary. let last_peer = peers.pop(); - // optimization: generate the protocol name once. - let protocol_name = protocol_names.get_name(peer_set, version); - gum::trace!(target: LOG_TARGET, ?peers, ?version, ?protocol_name, "Sending message to peers",); peers.into_iter().for_each(|peer| { net.write_notification(peer, protocol_name.clone(), message.clone()); }); From ba6078c09e83ff88e07fd4383f012451c988ec18 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 31 May 2023 13:47:53 +0000 Subject: [PATCH 070/105] fix Signed-off-by: Andrei Sandu --- node/network/bridge/src/rx/mod.rs | 32 +++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 8a7cb769c858..8dd67b1c9fcd 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -769,21 +769,25 @@ fn update_our_view( let vstaging_validation_peers = filter_by_version(&validation_peers, ValidationVersion::VStaging.into()); - send_validation_message_v1( - net, - v1_validation_peers, - peerset_protocol_names, - WireMessage::ViewUpdate(new_view.clone()), - metrics, - ); + if v1_validation_peers.len() > 0 { + send_validation_message_v1( + net, + v1_validation_peers, + peerset_protocol_names, + WireMessage::ViewUpdate(new_view.clone()), + metrics, + ); + } - send_validation_message_vstaging( - net, - vstaging_validation_peers, - peerset_protocol_names, - WireMessage::ViewUpdate(new_view.clone()), - metrics, - ); + if vstaging_validation_peers.len() > 0 { + send_validation_message_vstaging( + net, + vstaging_validation_peers, + peerset_protocol_names, + WireMessage::ViewUpdate(new_view.clone()), + metrics, + ); + } send_collation_message_v1( net, From bfce5798719b0f02a5736c73451614b57d1d4286 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 8 Jun 2023 13:11:01 +0000 Subject: [PATCH 071/105] Get rid of old "legacy" protocols, not used anymore Signed-off-by: Andrei Sandu --- node/network/protocol/src/peer_set.rs | 133 +++++++++++++------------- 1 file changed, 68 insertions(+), 65 deletions(-) diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index 94b9b4991c99..434d418ba3d8 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -28,13 +28,6 @@ use std::{ }; use strum::{EnumIter, IntoEnumIterator}; -/// The legacy protocol names. Only supported on version = 1. -const LEGACY_VALIDATION_PROTOCOL_V1: &str = "/polkadot/validation/1"; -const LEGACY_COLLATION_PROTOCOL_V1: &str = "/polkadot/collation/1"; - -/// The legacy protocol version. Is always 1 for both validation & collation. -const LEGACY_PROTOCOL_VERSION_V1: u32 = 1; - /// Max notification size is currently constant. pub const MAX_NOTIFICATION_SIZE: u64 = 100 * 1024; @@ -72,7 +65,7 @@ impl PeerSet { // Networking layer relies on `get_main_name()` being the main name of the protocol // for peersets and connection management. let protocol = peerset_protocol_names.get_main_name(self); - let fallback_names = PeerSetProtocolNames::get_fallback_names(self); + let fallback_names = peerset_protocol_names.get_fallback_names(self); let max_notification_size = self.get_max_notification_size(is_authority); match self { @@ -254,46 +247,57 @@ impl From for ProtocolVersion { #[derive(Clone)] pub struct PeerSetProtocolNames { protocols: HashMap, + legacy_protocols: HashMap, names: HashMap<(PeerSet, ProtocolVersion), ProtocolName>, + legacy_names: HashMap<(PeerSet, ProtocolVersion), ProtocolName>, } impl PeerSetProtocolNames { /// Construct [`PeerSetProtocols`] using `genesis_hash` and `fork_id`. pub fn new(genesis_hash: Hash, fork_id: Option<&str>) -> Self { let mut protocols = HashMap::new(); + let mut legacy_protocols = HashMap::new(); let mut names = HashMap::new(); + let mut legacy_names = HashMap::new(); + for protocol in PeerSet::iter() { match protocol { - PeerSet::Validation => - for version in ValidationVersion::iter() { - Self::register_main_protocol( - &mut protocols, - &mut names, - protocol, - version.into(), - &genesis_hash, - fork_id, - ); - }, - PeerSet::Collation => - for version in CollationVersion::iter() { - Self::register_main_protocol( - &mut protocols, - &mut names, - protocol, - version.into(), - &genesis_hash, - fork_id, - ); - }, + PeerSet::Validation => { + // Main protocol v2 + Self::register_protocol( + &mut protocols, + &mut names, + protocol, + ValidationVersion::VStaging.into(), + &genesis_hash, + fork_id, + ); + + // Legacy protocol v1 + Self::register_protocol( + &mut legacy_protocols, + &mut legacy_names, + protocol, + ValidationVersion::V1.into(), + &genesis_hash, + fork_id, + ); + }, + PeerSet::Collation => Self::register_protocol( + &mut protocols, + &mut names, + protocol, + CollationVersion::V1.into(), + &genesis_hash, + fork_id, + ), } - Self::register_legacy_protocol(&mut protocols, protocol); } - Self { protocols, names } + Self { protocols, legacy_protocols, names, legacy_names } } - /// Helper function to register main protocol. - fn register_main_protocol( + /// Helper function to register a protocol. + fn register_protocol( protocols: &mut HashMap, names: &mut HashMap<(PeerSet, ProtocolVersion), ProtocolName>, protocol: PeerSet, @@ -306,19 +310,6 @@ impl PeerSetProtocolNames { Self::insert_protocol_or_panic(protocols, protocol_name, protocol, version); } - /// Helper function to register legacy protocol. - fn register_legacy_protocol( - protocols: &mut HashMap, - protocol: PeerSet, - ) { - Self::insert_protocol_or_panic( - protocols, - Self::get_legacy_name(protocol), - protocol, - ProtocolVersion(LEGACY_PROTOCOL_VERSION_V1), - ) - } - /// Helper function to make sure no protocols have the same name. fn insert_protocol_or_panic( protocols: &mut HashMap, @@ -345,7 +336,10 @@ impl PeerSetProtocolNames { /// Lookup the protocol using its on the wire name. pub fn try_get_protocol(&self, name: &ProtocolName) -> Option<(PeerSet, ProtocolVersion)> { - self.protocols.get(name).map(ToOwned::to_owned) + self.protocols + .get(name) + .or_else(|| self.legacy_protocols.get(name)) + .map(ToOwned::to_owned) } /// Get the main protocol name. It's used by the networking for keeping track @@ -358,10 +352,20 @@ impl PeerSetProtocolNames { pub fn get_name(&self, protocol: PeerSet, version: ProtocolVersion) -> ProtocolName { self.names .get(&(protocol, version)) + .or_else(|| self.legacy_names.get(&(protocol, version))) .expect("Protocols & versions are specified via enums defined above, and they are all registered in `new()`; qed") .clone() } + /// Get the protocol name for legacy versions. + pub fn get_legacy_names(&self, protocol: PeerSet) -> Vec { + self.legacy_names + .iter() + .filter(|((legacy_protocol, _), _)| &protocol == legacy_protocol) + .map(|(_, protocol_name)| protocol_name.clone()) + .collect() + } + /// The protocol name of this protocol based on `genesis_hash` and `fork_id`. fn generate_name( genesis_hash: &Hash, @@ -383,19 +387,12 @@ impl PeerSetProtocolNames { format!("{}/{}/{}", prefix, short_name, version).into() } - /// Get the legacy protocol name, only `LEGACY_PROTOCOL_VERSION` = 1 is supported. - fn get_legacy_name(protocol: PeerSet) -> ProtocolName { + /// Get the protocol fallback names. + fn get_fallback_names(&self, protocol: PeerSet) -> Vec { match protocol { - PeerSet::Validation => LEGACY_VALIDATION_PROTOCOL_V1, - PeerSet::Collation => LEGACY_COLLATION_PROTOCOL_V1, + PeerSet::Validation => self.get_legacy_names(protocol), + PeerSet::Collation => vec![], } - .into() - } - - /// Get the protocol fallback names. Currently only holds the legacy name - /// for `LEGACY_PROTOCOL_VERSION` = 1. - fn get_fallback_names(protocol: PeerSet) -> Vec { - std::iter::once(Self::get_legacy_name(protocol)).collect() } } @@ -471,13 +468,20 @@ mod tests { let protocol_names = PeerSetProtocolNames::new(genesis_hash, None); let validation_main = - "/7ac8741de8b7146d8a5617fd462914557fe63c265a7f1c10e7dae32858eebb80/validation/1"; + "/7ac8741de8b7146d8a5617fd462914557fe63c265a7f1c10e7dae32858eebb80/validation/2"; assert_eq!( protocol_names.try_get_protocol(&validation_main.into()), - Some((PeerSet::Validation, TestVersion(1).into())), + Some((PeerSet::Validation, TestVersion(2).into())), ); - let validation_legacy = "/polkadot/validation/1"; + + let validation_legacy = + "/7ac8741de8b7146d8a5617fd462914557fe63c265a7f1c10e7dae32858eebb80/validation/1"; + + assert_eq!( + protocol_names.get_fallback_names(PeerSet::Validation), + vec![validation_legacy.into()], + ); assert_eq!( protocol_names.try_get_protocol(&validation_legacy.into()), Some((PeerSet::Validation, TestVersion(1).into())), @@ -490,10 +494,9 @@ mod tests { Some((PeerSet::Collation, TestVersion(1).into())), ); - let collation_legacy = "/polkadot/collation/1"; assert_eq!( - protocol_names.try_get_protocol(&collation_legacy.into()), - Some((PeerSet::Collation, TestVersion(1).into())), + protocol_names.get_fallback_names(PeerSet::Collation), + vec![], ); } From cd911e65fee4e16838e70b68a59831fc25f92f0f Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 8 Jun 2023 14:41:08 +0000 Subject: [PATCH 072/105] merge fixes Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/approval_db/v2/mod.rs | 2 -- node/core/approval-voting/src/lib.rs | 4 ++-- node/network/protocol/src/peer_set.rs | 6 +----- runtime/parachains/src/configuration/migration_ump.rs | 10 ++-------- 4 files changed, 5 insertions(+), 17 deletions(-) diff --git a/node/core/approval-voting/src/approval_db/v2/mod.rs b/node/core/approval-voting/src/approval_db/v2/mod.rs index 120d350a115e..08a5b2bb9d9d 100644 --- a/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -173,8 +173,6 @@ pub type Bitfield = BitVec; pub struct Config { /// The column family in the database where data is stored. pub col_approval_data: u32, - /// The column of the database where rolling session window data is stored. - pub col_session_data: u32, } /// Details pertaining to our assignment on a block. diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 4af781236d11..62f9c8123422 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -368,8 +368,8 @@ impl ApprovalVotingSubsystem { /// The operation is not allowed for blocks older than the last finalized one. pub fn revert_to(&self, hash: Hash) -> Result<(), SubsystemError> { let config = - approval_db::v1::Config { col_approval_data: self.db_config.col_approval_data }; - let mut backend = approval_db::v1::DbBackend::new(self.db.clone(), config); + approval_db::v2::Config { col_approval_data: self.db_config.col_approval_data }; + let mut backend = approval_db::v2::DbBackend::new(self.db.clone(), config); let mut overlay = OverlayedBackend::new(&backend); ops::revert_to(&mut overlay, hash)?; diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index 434d418ba3d8..0fd68fdc02ab 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -474,7 +474,6 @@ mod tests { Some((PeerSet::Validation, TestVersion(2).into())), ); - let validation_legacy = "/7ac8741de8b7146d8a5617fd462914557fe63c265a7f1c10e7dae32858eebb80/validation/1"; @@ -494,10 +493,7 @@ mod tests { Some((PeerSet::Collation, TestVersion(1).into())), ); - assert_eq!( - protocol_names.get_fallback_names(PeerSet::Collation), - vec![], - ); + assert_eq!(protocol_names.get_fallback_names(PeerSet::Collation), vec![],); } #[test] diff --git a/runtime/parachains/src/configuration/migration_ump.rs b/runtime/parachains/src/configuration/migration_ump.rs index c46f25108fa3..008a93142ee7 100644 --- a/runtime/parachains/src/configuration/migration_ump.rs +++ b/runtime/parachains/src/configuration/migration_ump.rs @@ -107,18 +107,12 @@ pub mod latest { "There must be exactly one new pending upgrade enqueued" ); if let Err(err) = last.1.check_consistency() { - log::error!( - target: LOG_TARGET, - "Last PendingConfig is invalidity {:?}", err, - ); + log::error!(target: LOG_TARGET, "Last PendingConfig is invalidity {:?}", err,); return Err("Pending upgrade must be sane but was not".into()) } if let Err(err) = ActiveConfig::::get().check_consistency() { - log::error!( - target: LOG_TARGET, - "ActiveConfig is invalid: {:?}", err, - ); + log::error!(target: LOG_TARGET, "ActiveConfig is invalid: {:?}", err,); return Err("Active upgrade must be sane but was not".into()) } From 959e529923c7bdabd431929c83f9e23c1c1914de Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 9 Jun 2023 11:31:20 +0000 Subject: [PATCH 073/105] Refactor and always send on main protocol version Signed-off-by: Andrei Sandu --- node/network/bridge/src/network.rs | 55 ++++++++++++++++++--- node/network/bridge/src/rx/mod.rs | 77 +++-------------------------- node/network/bridge/src/tx/mod.rs | 66 +++---------------------- node/network/bridge/src/tx/tests.rs | 4 +- 4 files changed, 65 insertions(+), 137 deletions(-) diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 012dbfe5361c..6a721758439a 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -28,33 +28,76 @@ use sc_network::{ }; use polkadot_node_network_protocol::{ - peer_set::{PeerSet, PeerSetProtocolNames, ProtocolVersion}, + peer_set::{ + CollationVersion, PeerSet, PeerSetProtocolNames, ProtocolVersion, ValidationVersion, + }, request_response::{OutgoingRequest, Recipient, ReqProtocolNames, Requests}, - PeerId, UnifiedReputationChange as Rep, + v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, }; use polkadot_primitives::{AuthorityDiscoveryId, Block, Hash}; -use crate::validator_discovery::AuthorityDiscovery; +use crate::{metrics::Metrics, validator_discovery::AuthorityDiscovery, WireMessage}; // network bridge network abstraction log target const LOG_TARGET: &'static str = "parachain::network-bridge-net"; -/// Send a message to the network. +// Helper function to send a validation v1 message to a list of peers. +// Messages are always sent via the main protocol, even legacy protocol messages. +pub(crate) fn send_validation_message_v1( + net: &mut impl Network, + peers: Vec, + peerset_protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",); + + send_message(net, peers, PeerSet::Validation, peerset_protocol_names, message, metrics); +} + +// Helper function to send a validation v2 message to a list of peers. +// Messages are always sent via the main protocol, even legacy protocol messages. +pub(crate) fn send_validation_message_v2( + net: &mut impl Network, + peers: Vec, + peerset_protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v2 message to peers",); + + send_message(net, peers, PeerSet::Validation, peerset_protocol_names, message, metrics); +} + +// Helper function to send a collation v1 message to a list of peers. +// Messages are always sent via the main protocol, even legacy protocol messages. +pub(crate) fn send_collation_message_v1( + net: &mut impl Network, + peers: Vec, + peerset_protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message(net, peers, PeerSet::Collation, peerset_protocol_names, message, metrics); +} + +/// Lower level function that sends a message to the network using the main protocol version. /// /// This function is only used internally by the network-bridge, which is responsible to only send /// messages that are compatible with the passed peer set, as that is currently not enforced by /// this function. These are messages of type `WireMessage` parameterized on the matching type. -pub(crate) fn send_message( +fn send_message( net: &mut impl Network, mut peers: Vec, peer_set: PeerSet, - version: ProtocolVersion, protocol_names: &PeerSetProtocolNames, message: M, metrics: &super::Metrics, ) where M: Encode + Clone, { + // Always use main version for sending messages. + let version = peer_set.get_main_version(); let message = { let encoded = message.encode(); metrics.on_notification_sent(peer_set, version, encoded.len(), peers.len()); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 8dd67b1c9fcd..4e0fd149126e 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -63,9 +63,10 @@ use super::validator_discovery; /// Actual interfacing to the network based on the `Network` trait. /// /// Defines the `Network` trait with an implementation for an `Arc`. -use crate::network::{send_message, Network}; - -use crate::network::get_peer_id_by_authority_id; +use crate::network::{ + send_collation_message_v1, send_validation_message_v1, send_validation_message_v2, Network, +}; +use crate::{network::get_peer_id_by_authority_id, WireMessage}; use super::metrics::Metrics; @@ -248,22 +249,18 @@ where match ValidationVersion::try_from(version) .expect("try_get_protocol has already checked version is known; qed") { - ValidationVersion::V1 => send_message( + ValidationVersion::V1 => send_validation_message_v1( &mut network_service, vec![peer], - PeerSet::Validation, - version, &peerset_protocol_names, WireMessage::::ViewUpdate( local_view, ), &metrics, ), - ValidationVersion::VStaging => send_message( + ValidationVersion::VStaging => send_validation_message_v2( &mut network_service, vec![peer], - PeerSet::Validation, - version, &peerset_protocol_names, WireMessage::::ViewUpdate( local_view, @@ -287,11 +284,9 @@ where ) .await; - send_message( + send_collation_message_v1( &mut network_service, vec![peer], - PeerSet::Collation, - version, &peerset_protocol_names, WireMessage::::ViewUpdate(local_view), &metrics, @@ -780,7 +775,7 @@ fn update_our_view( } if vstaging_validation_peers.len() > 0 { - send_validation_message_vstaging( + send_validation_message_v2( net, vstaging_validation_peers, peerset_protocol_names, @@ -866,62 +861,6 @@ fn handle_peer_messages>( (outgoing_events, reports) } -fn send_validation_message_v1( - net: &mut impl Network, - peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",); - send_message( - net, - peers, - PeerSet::Validation, - ValidationVersion::V1.into(), - peerset_protocol_names, - message, - metrics, - ); -} - -fn send_validation_message_vstaging( - net: &mut impl Network, - peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v2 message to peers",); - send_message( - net, - peers, - PeerSet::Validation, - ValidationVersion::VStaging.into(), - peerset_protocol_names, - message, - metrics, - ); -} - -fn send_collation_message_v1( - net: &mut impl Network, - peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Collation, - CollationVersion::V1.into(), - peerset_protocol_names, - message, - metrics, - ); -} - async fn dispatch_validation_event_to_all( event: NetworkBridgeEvent, ctx: &mut impl overseer::NetworkBridgeRxSenderTrait, diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index 961cab8e507f..3a168ff74a2a 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -18,9 +18,7 @@ use super::*; use polkadot_node_network_protocol::{ - peer_set::{CollationVersion, PeerSet, PeerSetProtocolNames, ValidationVersion}, - request_response::ReqProtocolNames, - v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, Versioned, + peer_set::PeerSetProtocolNames, request_response::ReqProtocolNames, Versioned, }; use polkadot_node_subsystem::{ @@ -38,7 +36,9 @@ use crate::validator_discovery; /// Actual interfacing to the network based on the `Network` trait. /// /// Defines the `Network` trait with an implementation for an `Arc`. -use crate::network::{send_message, Network}; +use crate::network::{ + send_collation_message_v1, send_validation_message_v1, send_validation_message_v2, Network, +}; use crate::metrics::Metrics; @@ -184,7 +184,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::VStaging(msg) => send_validation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -210,7 +210,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::VStaging(msg) => send_validation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -361,57 +361,3 @@ where Ok(()) } - -fn send_validation_message_v1( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Validation, - ValidationVersion::V1.into(), - protocol_names, - message, - metrics, - ); -} - -fn send_validation_message_vstaging( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Validation, - ValidationVersion::VStaging.into(), - protocol_names, - message, - metrics, - ); -} - -fn send_collation_message_v1( - net: &mut impl Network, - peers: Vec, - protocol_names: &PeerSetProtocolNames, - message: WireMessage, - metrics: &Metrics, -) { - send_message( - net, - peers, - PeerSet::Collation, - CollationVersion::V1.into(), - protocol_names, - message, - metrics, - ); -} diff --git a/node/network/bridge/src/tx/tests.rs b/node/network/bridge/src/tx/tests.rs index 7b25fb1eff0f..51a2c60f3ec8 100644 --- a/node/network/bridge/src/tx/tests.rs +++ b/node/network/bridge/src/tx/tests.rs @@ -25,9 +25,9 @@ use std::collections::HashSet; use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName}; use polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, + peer_set::{PeerSetProtocolNames, ValidationVersion}, request_response::{outgoing::Requests, ReqProtocolNames}, - ObservedRole, Versioned, + v1 as protocol_v1, vstaging as protocol_vstaging, ObservedRole, Versioned, }; use polkadot_node_subsystem::{FromOrchestra, OverseerSignal}; use polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; From a8e3f28b1766e9cc665220962bf9e16383e7764b Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 9 Jun 2023 11:38:54 +0000 Subject: [PATCH 074/105] Fix db upgrade after merge and fmt Signed-off-by: Andrei Sandu --- node/network/bridge/src/network.rs | 4 +--- node/service/src/parachains_db/upgrade.rs | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 6a721758439a..2542117a72fc 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -28,9 +28,7 @@ use sc_network::{ }; use polkadot_node_network_protocol::{ - peer_set::{ - CollationVersion, PeerSet, PeerSetProtocolNames, ProtocolVersion, ValidationVersion, - }, + peer_set::{PeerSet, PeerSetProtocolNames}, request_response::{OutgoingRequest, Recipient, ReqProtocolNames, Requests}, v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, }; diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 120de969b2b2..bfbbaf5f8d3c 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -140,7 +140,7 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), // Migrade approval voting database. `OurAssignment` has been changed to support the v2 assignments. // As these are backwards compatible, we'll convert the old entries in the new format. fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { - gum::info!(target: LOG_TARGET, "Migrating parachains db from version 2 to version 3 ..."); + gum::info!(target: LOG_TARGET, "Migrating parachains db from version 3 to version 4 ..."); use polkadot_node_subsystem_util::database::{ kvdb_impl::DbAdapter as RocksDbAdapter, paritydb_impl::DbAdapter as ParityDbAdapter, }; @@ -152,7 +152,7 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result<(), let result = match db_kind { DatabaseKind::ParityDB => { let db = ParityDbAdapter::new( - parity_db::Db::open(&paritydb_version_2_config(path)) + parity_db::Db::open(&paritydb_version_3_config(path)) .map_err(|e| other_io_error(format!("Error opening db {:?}", e)))?, super::columns::v3::ORDERED_COL, ); @@ -165,7 +165,7 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result<(), .to_str() .ok_or_else(|| super::other_io_error("Invalid database path".into()))?; let db_cfg = - kvdb_rocksdb::DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + kvdb_rocksdb::DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); let db = RocksDbAdapter::new( kvdb_rocksdb::Database::open(&db_cfg, db_path)?, &super::columns::v3::ORDERED_COL, @@ -525,17 +525,20 @@ mod tests { use polkadot_node_core_approval_voting::approval_db::v2::migrate_approval_db_v1_to_v2_sanity_check; use polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter; + let db_dir = tempfile::tempdir().unwrap(); + let db_path = db_dir.path().to_str().unwrap(); + let db_cfg: DatabaseConfig = DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); + let approval_cfg = ApprovalDbConfig { col_approval_data: crate::parachains_db::REAL_COLUMNS.col_approval_data, - col_session_data: crate::parachains_db::REAL_COLUMNS.col_session_window_data, }; // We need to properly set db version for upgrade to work. - fs::write(version_file_path(db_dir.path()), "2").expect("Failed to write DB version"); + fs::write(version_file_path(db_dir.path()), "3").expect("Failed to write DB version"); let expected_candidates = { let db = Database::open(&db_cfg, db_path.clone()).unwrap(); - assert_eq!(db.num_columns(), super::columns::v2::NUM_COLUMNS as u32); - let db = DbAdapter::new(db, columns::v2::ORDERED_COL); + assert_eq!(db.num_columns(), super::columns::v3::NUM_COLUMNS as u32); + let db = DbAdapter::new(db, columns::v3::ORDERED_COL); // Fill the approval voting column with test data. migrate_approval_db_v1_to_v2_fill_test_data( std::sync::Arc::new(db), @@ -546,9 +549,9 @@ mod tests { try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); - let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + let db_cfg = DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).unwrap(); - let db = DbAdapter::new(db, columns::v2::ORDERED_COL); + let db = DbAdapter::new(db, columns::v3::ORDERED_COL); migrate_approval_db_v1_to_v2_sanity_check( std::sync::Arc::new(db), @@ -558,6 +561,7 @@ mod tests { .unwrap(); } + #[test] fn test_paritydb_migrate_2_to_3() { use parity_db::Db; From 78de04e0eca1f596e0b33e6420deb01ce062bc6a Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 9 Jun 2023 11:41:08 +0000 Subject: [PATCH 075/105] Fix zombienet merge damage Signed-off-by: Andrei Sandu --- scripts/ci/gitlab/pipeline/zombienet.yml | 2 +- ...ains-max-tranche0.toml => 0005-parachains-max-tranche0.toml} | 0 ...ns-max-tranche0.zndsl => 0005-parachains-max-tranche0.zndsl} | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename zombienet_tests/functional/{0004-parachains-max-tranche0.toml => 0005-parachains-max-tranche0.toml} (100%) rename zombienet_tests/functional/{0004-parachains-max-tranche0.zndsl => 0005-parachains-max-tranche0.zndsl} (97%) diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index 3c1d85a09256..c5c2b4b4003b 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -178,7 +178,7 @@ zombienet-tests-parachains-max-tranche0-approvals: script: - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh --github-remote-dir="${GH_DIR}" - --test="0004-parachains-max-tranche0.zndsl" + --test="0005-parachains-max-tranche0.zndsl" allow_failure: false retry: 2 tags: diff --git a/zombienet_tests/functional/0004-parachains-max-tranche0.toml b/zombienet_tests/functional/0005-parachains-max-tranche0.toml similarity index 100% rename from zombienet_tests/functional/0004-parachains-max-tranche0.toml rename to zombienet_tests/functional/0005-parachains-max-tranche0.toml diff --git a/zombienet_tests/functional/0004-parachains-max-tranche0.zndsl b/zombienet_tests/functional/0005-parachains-max-tranche0.zndsl similarity index 97% rename from zombienet_tests/functional/0004-parachains-max-tranche0.zndsl rename to zombienet_tests/functional/0005-parachains-max-tranche0.zndsl index 4be4812fd1bd..48a56f60c9bc 100644 --- a/zombienet_tests/functional/0004-parachains-max-tranche0.zndsl +++ b/zombienet_tests/functional/0005-parachains-max-tranche0.zndsl @@ -1,5 +1,5 @@ Description: Test if parachains make progress with most of approvals being tranch0 -Network: ./0004-parachains-max-tranche0.toml +Network: ./0005-parachains-max-tranche0.toml Creds: config # Check authority status. From 112b0be9c6daaa2de942e3bfdefc0f2610d6bcb4 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 12 Jun 2023 09:26:47 +0000 Subject: [PATCH 076/105] Implement seq updates 0->4 now possible for parachainsDB Signed-off-by: Andrei Sandu --- node/service/src/parachains_db/mod.rs | 33 +++--- node/service/src/parachains_db/upgrade.rs | 128 ++++++++++++++-------- 2 files changed, 103 insertions(+), 58 deletions(-) diff --git a/node/service/src/parachains_db/mod.rs b/node/service/src/parachains_db/mod.rs index 519afbe0ccd1..d4926a4cb00b 100644 --- a/node/service/src/parachains_db/mod.rs +++ b/node/service/src/parachains_db/mod.rs @@ -41,7 +41,12 @@ pub(crate) mod columns { pub const COL_SESSION_WINDOW_DATA: u32 = 5; } + // Version 4 only changed structures in approval voting, so we can re-export the v4 definitions. pub mod v3 { + pub use super::v4::*; + } + + pub mod v4 { pub const NUM_COLUMNS: u32 = 5; pub const COL_AVAILABILITY_DATA: u32 = 0; pub const COL_AVAILABILITY_META: u32 = 1; @@ -73,14 +78,14 @@ pub struct ColumnsConfig { /// The real columns used by the parachains DB. #[cfg(any(test, feature = "full-node"))] pub const REAL_COLUMNS: ColumnsConfig = ColumnsConfig { - col_availability_data: columns::v3::COL_AVAILABILITY_DATA, - col_availability_meta: columns::v3::COL_AVAILABILITY_META, - col_approval_data: columns::v3::COL_APPROVAL_DATA, - col_chain_selection_data: columns::v3::COL_CHAIN_SELECTION_DATA, - col_dispute_coordinator_data: columns::v3::COL_DISPUTE_COORDINATOR_DATA, + col_availability_data: columns::v4::COL_AVAILABILITY_DATA, + col_availability_meta: columns::v4::COL_AVAILABILITY_META, + col_approval_data: columns::v4::COL_APPROVAL_DATA, + col_chain_selection_data: columns::v4::COL_CHAIN_SELECTION_DATA, + col_dispute_coordinator_data: columns::v4::COL_DISPUTE_COORDINATOR_DATA, }; -#[derive(PartialEq)] +#[derive(PartialEq, Copy, Clone)] pub(crate) enum DatabaseKind { ParityDB, RocksDB, @@ -125,28 +130,28 @@ pub fn open_creating_rocksdb( let path = root.join("parachains").join("db"); - let mut db_config = DatabaseConfig::with_columns(columns::v3::NUM_COLUMNS); + let mut db_config = DatabaseConfig::with_columns(columns::v4::NUM_COLUMNS); let _ = db_config .memory_budget - .insert(columns::v3::COL_AVAILABILITY_DATA, cache_sizes.availability_data); + .insert(columns::v4::COL_AVAILABILITY_DATA, cache_sizes.availability_data); let _ = db_config .memory_budget - .insert(columns::v3::COL_AVAILABILITY_META, cache_sizes.availability_meta); + .insert(columns::v4::COL_AVAILABILITY_META, cache_sizes.availability_meta); let _ = db_config .memory_budget - .insert(columns::v3::COL_APPROVAL_DATA, cache_sizes.approval_data); + .insert(columns::v4::COL_APPROVAL_DATA, cache_sizes.approval_data); let path_str = path .to_str() .ok_or_else(|| other_io_error(format!("Bad database path: {:?}", path)))?; std::fs::create_dir_all(&path_str)?; - upgrade::try_upgrade_db(&path, DatabaseKind::RocksDB)?; + upgrade::try_upgrade_db(&path, DatabaseKind::RocksDB, upgrade::CURRENT_VERSION)?; let db = Database::open(&db_config, &path_str)?; let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new( db, - columns::v3::ORDERED_COL, + columns::v4::ORDERED_COL, ); Ok(Arc::new(db)) @@ -164,14 +169,14 @@ pub fn open_creating_paritydb( .ok_or_else(|| other_io_error(format!("Bad database path: {:?}", path)))?; std::fs::create_dir_all(&path_str)?; - upgrade::try_upgrade_db(&path, DatabaseKind::ParityDB)?; + upgrade::try_upgrade_db(&path, DatabaseKind::ParityDB, upgrade::CURRENT_VERSION)?; let db = parity_db::Db::open_or_create(&upgrade::paritydb_version_3_config(&path)) .map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?; let db = polkadot_node_subsystem_util::database::paritydb_impl::DbAdapter::new( db, - columns::v3::ORDERED_COL, + columns::v4::ORDERED_COL, ); Ok(Arc::new(db)) } diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index bfbbaf5f8d3c..07a1a5135f69 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -32,7 +32,7 @@ const VERSION_FILE_NAME: &'static str = "parachain_db_version"; /// Current db version. /// Version 4 changes approval db format for `OurAssignment`. -const CURRENT_VERSION: Version = 4; +pub(crate) const CURRENT_VERSION: Version = 4; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -55,10 +55,31 @@ impl From for io::Error { } } -/// Try upgrading parachain's database to the current version. -pub(crate) fn try_upgrade_db(db_path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +/// Try upgrading parachain's database to a target version. +pub(crate) fn try_upgrade_db( + db_path: &Path, + db_kind: DatabaseKind, + target_version: Version, +) -> Result<(), Error> { + // Loop upgrades until we reach the target version + loop { + let version = try_upgrade_db_to_next_version(db_path, db_kind)?; + if version == target_version { + break + } + } + Ok(()) +} + +/// Try upgrading parachain's database to the next version. +/// If successfull, it returns the current version. +pub(crate) fn try_upgrade_db_to_next_version( + db_path: &Path, + db_kind: DatabaseKind, +) -> Result { let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none()); - if !is_empty { + + let new_version = if !is_empty { match get_db_version(db_path)? { // 0 -> 1 migration Some(0) => migrate_from_version_0_to_1(db_path, db_kind)?, @@ -69,20 +90,23 @@ pub(crate) fn try_upgrade_db(db_path: &Path, db_kind: DatabaseKind) -> Result<() // 3 -> 4 migration Some(3) => migrate_from_version_3_to_4(db_path, db_kind)?, // Already at current version, do nothing. - Some(CURRENT_VERSION) => (), + Some(CURRENT_VERSION) => CURRENT_VERSION, // This is an arbitrary future version, we don't handle it. Some(v) => return Err(Error::FutureVersion { current: CURRENT_VERSION, got: v }), // No version file. For `RocksDB` we dont need to do anything. - None if db_kind == DatabaseKind::RocksDB => (), + None if db_kind == DatabaseKind::RocksDB => CURRENT_VERSION, // No version file. `ParityDB` did not previously have a version defined. // We handle this as a `0 -> 1` migration. None if db_kind == DatabaseKind::ParityDB => migrate_from_version_0_to_1(db_path, db_kind)?, None => unreachable!(), } - } + } else { + CURRENT_VERSION + }; - update_version(db_path) + update_version(db_path, new_version)?; + Ok(new_version) } /// Reads current database version from the file at given path. @@ -99,9 +123,9 @@ fn get_db_version(path: &Path) -> Result, Error> { /// Writes current database version to the file. /// Creates a new file if the version file does not exist yet. -fn update_version(path: &Path) -> Result<(), Error> { +fn update_version(path: &Path, new_version: Version) -> Result<(), Error> { fs::create_dir_all(path)?; - fs::write(version_file_path(path), CURRENT_VERSION.to_string()).map_err(Into::into) + fs::write(version_file_path(path), new_version.to_string()).map_err(Into::into) } /// Returns the version file path. @@ -111,7 +135,7 @@ fn version_file_path(path: &Path) -> PathBuf { file_path } -fn migrate_from_version_0_to_1(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +fn migrate_from_version_0_to_1(path: &Path, db_kind: DatabaseKind) -> Result { gum::info!(target: LOG_TARGET, "Migrating parachains db from version 0 to version 1 ..."); match db_kind { @@ -124,7 +148,7 @@ fn migrate_from_version_0_to_1(path: &Path, db_kind: DatabaseKind) -> Result<(), }) } -fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result { gum::info!(target: LOG_TARGET, "Migrating parachains db from version 1 to version 2 ..."); match db_kind { @@ -139,7 +163,7 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), // Migrade approval voting database. `OurAssignment` has been changed to support the v2 assignments. // As these are backwards compatible, we'll convert the old entries in the new format. -fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result { gum::info!(target: LOG_TARGET, "Migrating parachains db from version 3 to version 4 ..."); use polkadot_node_subsystem_util::database::{ kvdb_impl::DbAdapter as RocksDbAdapter, paritydb_impl::DbAdapter as ParityDbAdapter, @@ -149,7 +173,7 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result<(), let approval_db_config = ApprovalDbConfig { col_approval_data: super::REAL_COLUMNS.col_approval_data }; - let result = match db_kind { + let _result = match db_kind { DatabaseKind::ParityDB => { let db = ParityDbAdapter::new( parity_db::Db::open(&paritydb_version_3_config(path)) @@ -177,10 +201,10 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result<(), }; gum::info!(target: LOG_TARGET, "Migration complete! "); - Ok(result) + Ok(CURRENT_VERSION) } -fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { +fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result { gum::info!(target: LOG_TARGET, "Migrating parachains db from version 2 to version 3 ..."); match db_kind { DatabaseKind::ParityDB => paritydb_migrate_from_version_2_to_3(path), @@ -194,7 +218,7 @@ fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result<(), /// Migration from version 0 to version 1: /// * the number of columns has changed from 3 to 5; -fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { +fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result { use kvdb_rocksdb::{Database, DatabaseConfig}; let db_path = path @@ -206,12 +230,12 @@ fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { db.add_column()?; db.add_column()?; - Ok(()) + Ok(1) } /// Migration from version 1 to version 2: /// * the number of columns has changed from 5 to 6; -fn rocksdb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { +fn rocksdb_migrate_from_version_1_to_2(path: &Path) -> Result { use kvdb_rocksdb::{Database, DatabaseConfig}; let db_path = path @@ -222,10 +246,10 @@ fn rocksdb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { db.add_column()?; - Ok(()) + Ok(2) } -fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { +fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result { use kvdb_rocksdb::{Database, DatabaseConfig}; let db_path = path @@ -236,7 +260,7 @@ fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { db.remove_last_column()?; - Ok(()) + Ok(3) } // This currently clears columns which had their configs altered between versions. @@ -300,7 +324,7 @@ fn paritydb_fix_columns( pub(crate) fn paritydb_version_1_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - for i in columns::v3::ORDERED_COL { + for i in columns::v4::ORDERED_COL { options.columns[*i as usize].btree_index = true; } @@ -311,7 +335,7 @@ pub(crate) fn paritydb_version_1_config(path: &Path) -> parity_db::Options { pub(crate) fn paritydb_version_2_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v2::NUM_COLUMNS as u8); - for i in columns::v3::ORDERED_COL { + for i in columns::v4::ORDERED_COL { options.columns[*i as usize].btree_index = true; } @@ -334,8 +358,8 @@ pub(crate) fn paritydb_version_3_config(path: &Path) -> parity_db::Options { pub(crate) fn paritydb_version_0_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - options.columns[super::columns::v3::COL_AVAILABILITY_META as usize].btree_index = true; - options.columns[super::columns::v3::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; + options.columns[super::columns::v4::COL_AVAILABILITY_META as usize].btree_index = true; + options.columns[super::columns::v4::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; options } @@ -345,41 +369,41 @@ pub(crate) fn paritydb_version_0_config(path: &Path) -> parity_db::Options { /// - upgrading from v0.9.23 or earlier -> the `dispute coordinator column` was changed /// - upgrading from v0.9.24+ -> this is a no op assuming the DB has been manually fixed as per /// release notes -fn paritydb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { +fn paritydb_migrate_from_version_0_to_1(path: &Path) -> Result { // Delete the `dispute coordinator` column if needed (if column configuration is changed). paritydb_fix_columns( path, paritydb_version_1_config(path), - vec![super::columns::v3::COL_DISPUTE_COORDINATOR_DATA], + vec![super::columns::v4::COL_DISPUTE_COORDINATOR_DATA], )?; - Ok(()) + Ok(1) } /// Migration from version 1 to version 2: /// - add a new column for session information storage -fn paritydb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { +fn paritydb_migrate_from_version_1_to_2(path: &Path) -> Result { let mut options = paritydb_version_1_config(path); // Adds the session info column. parity_db::Db::add_column(&mut options, Default::default()) .map_err(|e| other_io_error(format!("Error adding column {:?}", e)))?; - Ok(()) + Ok(2) } /// Migration from version 2 to version 3: /// - drop the column used by `RollingSessionWindow` -fn paritydb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { +fn paritydb_migrate_from_version_2_to_3(path: &Path) -> Result { parity_db::Db::drop_last_column(&mut paritydb_version_2_config(path)) .map_err(|e| other_io_error(format!("Error removing COL_SESSION_WINDOW_DATA {:?}", e)))?; - Ok(()) + Ok(3) } #[cfg(test)] mod tests { use super::{ - columns::{v2::COL_SESSION_WINDOW_DATA, v3::*}, + columns::{v2::COL_SESSION_WINDOW_DATA, v4::*}, *, }; use polkadot_node_core_approval_voting::approval_db::v2::migrate_approval_db_v1_to_v2_fill_test_data; @@ -400,7 +424,7 @@ mod tests { .unwrap(); } - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); + try_upgrade_db(&path, DatabaseKind::ParityDB, 1).unwrap(); let db = Db::open(&paritydb_version_1_config(&path)).unwrap(); assert_eq!(db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), None); @@ -434,7 +458,7 @@ mod tests { assert_eq!(db.num_columns(), columns::v1::NUM_COLUMNS as u8); } - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); + try_upgrade_db(&path, DatabaseKind::ParityDB, 2).unwrap(); let db = Db::open(&paritydb_version_2_config(&path)).unwrap(); @@ -477,7 +501,7 @@ mod tests { // We need to properly set db version for upgrade to work. fs::write(version_file_path(db_dir.path()), "1").expect("Failed to write DB version"); { - let db = DbAdapter::new(db, columns::v3::ORDERED_COL); + let db = DbAdapter::new(db, columns::v4::ORDERED_COL); db.write(DBTransaction { ops: vec![DBOp::Insert { col: COL_DISPUTE_COORDINATOR_DATA, @@ -488,14 +512,14 @@ mod tests { .unwrap(); } - try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 2).unwrap(); let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).unwrap(); assert_eq!(db.num_columns(), super::columns::v2::NUM_COLUMNS); - let db = DbAdapter::new(db, columns::v3::ORDERED_COL); + let db = DbAdapter::new(db, columns::v4::ORDERED_COL); assert_eq!( db.get(COL_DISPUTE_COORDINATOR_DATA, b"1234").unwrap(), @@ -547,11 +571,11 @@ mod tests { .unwrap() }; - try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 4).unwrap(); - let db_cfg = DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); + let db_cfg = DatabaseConfig::with_columns(super::columns::v4::NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).unwrap(); - let db = DbAdapter::new(db, columns::v3::ORDERED_COL); + let db = DbAdapter::new(db, columns::v4::ORDERED_COL); migrate_approval_db_v1_to_v2_sanity_check( std::sync::Arc::new(db), @@ -561,6 +585,22 @@ mod tests { .unwrap(); } + #[test] + fn test_migrate_0_to_4() { + use kvdb_rocksdb::{Database, DatabaseConfig}; + + let db_dir = tempfile::tempdir().unwrap(); + let db_path = db_dir.path().to_str().unwrap(); + + fs::write(version_file_path(db_dir.path()), "0").expect("Failed to write DB version"); + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 4).unwrap(); + + let db_cfg = DatabaseConfig::with_columns(super::columns::v4::NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).unwrap(); + + assert_eq!(db.num_columns(), columns::v4::NUM_COLUMNS); + } + #[test] fn test_paritydb_migrate_2_to_3() { use parity_db::Db; @@ -586,7 +626,7 @@ mod tests { assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); } - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); + try_upgrade_db(&path, DatabaseKind::ParityDB, 3).unwrap(); let db = Db::open(&paritydb_version_3_config(&path)).unwrap(); @@ -609,7 +649,7 @@ mod tests { // We need to properly set db version for upgrade to work. fs::write(version_file_path(db_dir.path()), "2").expect("Failed to write DB version"); - try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 3).unwrap(); let db_cfg = DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).unwrap(); From 4b1743c89117258db66d6505e2f37859171281f4 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 13 Jun 2023 07:53:40 +0000 Subject: [PATCH 077/105] add/fix db upgrade tests Signed-off-by: Andrei Sandu --- node/service/src/parachains_db/upgrade.rs | 50 +++++++++++++++++------ 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 07a1a5135f69..020b058d7979 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -44,6 +44,8 @@ pub enum Error { FutureVersion { current: Version, got: Version }, #[error("Parachain DB migration failed")] MigrationFailed, + #[error("Parachain DB migration would take forever")] + MigrationLoop, } impl From for io::Error { @@ -61,14 +63,18 @@ pub(crate) fn try_upgrade_db( db_kind: DatabaseKind, target_version: Version, ) -> Result<(), Error> { - // Loop upgrades until we reach the target version - loop { + // Ensure we don't loop forever below befcause of a bug. + const MAX_MIGRATIONS: u32 = 30; + + // Loop migrations until we reach the target version. + for _ in 0..MAX_MIGRATIONS { let version = try_upgrade_db_to_next_version(db_path, db_kind)?; if version == target_version { - break + return Ok(()) } } - Ok(()) + + Err(Error::MigrationLoop) } /// Try upgrading parachain's database to the next version. @@ -357,9 +363,8 @@ pub(crate) fn paritydb_version_3_config(path: &Path) -> parity_db::Options { #[cfg(test)] pub(crate) fn paritydb_version_0_config(path: &Path) -> parity_db::Options { let mut options = - parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); + parity_db::Options::with_columns(&path, super::columns::v0::NUM_COLUMNS as u8); options.columns[super::columns::v4::COL_AVAILABILITY_META as usize].btree_index = true; - options.columns[super::columns::v4::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; options } @@ -417,17 +422,17 @@ mod tests { { let db = Db::open_or_create(&paritydb_version_0_config(&path)).unwrap(); - db.commit(vec![ - (COL_DISPUTE_COORDINATOR_DATA as u8, b"1234".to_vec(), Some(b"somevalue".to_vec())), - (COL_AVAILABILITY_META as u8, b"5678".to_vec(), Some(b"somevalue".to_vec())), - ]) + db.commit(vec![( + COL_AVAILABILITY_META as u8, + b"5678".to_vec(), + Some(b"somevalue".to_vec()), + )]) .unwrap(); } try_upgrade_db(&path, DatabaseKind::ParityDB, 1).unwrap(); let db = Db::open(&paritydb_version_1_config(&path)).unwrap(); - assert_eq!(db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), None); assert_eq!( db.get(COL_AVAILABILITY_META as u8, b"5678").unwrap(), Some("somevalue".as_bytes().to_vec()) @@ -586,7 +591,7 @@ mod tests { } #[test] - fn test_migrate_0_to_4() { + fn test_rocksdb_migrate_0_to_4() { use kvdb_rocksdb::{Database, DatabaseConfig}; let db_dir = tempfile::tempdir().unwrap(); @@ -601,6 +606,27 @@ mod tests { assert_eq!(db.num_columns(), columns::v4::NUM_COLUMNS); } + #[test] + fn test_paritydb_migrate_0_to_4() { + use parity_db::Db; + + let db_dir = tempfile::tempdir().unwrap(); + let path = db_dir.path(); + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(path), "0").expect("Failed to write DB version"); + + { + let db = Db::open_or_create(&paritydb_version_0_config(&path)).unwrap(); + assert_eq!(db.num_columns(), columns::v0::NUM_COLUMNS as u8); + } + + try_upgrade_db(&path, DatabaseKind::ParityDB, 4).unwrap(); + + let db = Db::open(&paritydb_version_3_config(&path)).unwrap(); + assert_eq!(db.num_columns(), columns::v4::NUM_COLUMNS as u8); + } + #[test] fn test_paritydb_migrate_2_to_3() { use parity_db::Db; From 4e0fac6aec0a72fc76850bb2c31ccb2c68d2b841 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Wed, 14 Jun 2023 09:30:52 -0300 Subject: [PATCH 078/105] update colander image version --- scripts/ci/gitlab/pipeline/zombienet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index c5c2b4b4003b..417096332f00 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -22,7 +22,7 @@ zombienet-tests-parachains-smoke-test: - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG} - export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG} - - export COL_IMAGE="docker.io/paritypr/colander:4519" # The collator image is fixed + - export COL_IMAGE="docker.io/paritypr/colander:master-94cadaea" # The collator image is fixed script: - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh --github-remote-dir="${GH_DIR}" From 7f973550d2a2bc4786d57c13359bb8cd93a4c986 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Wed, 14 Jun 2023 15:02:31 -0300 Subject: [PATCH 079/105] update image to polkadot-parachain --- scripts/ci/gitlab/pipeline/zombienet.yml | 2 +- zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index 417096332f00..46dc9b0bf173 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -204,7 +204,7 @@ zombienet-test-parachains-upgrade-smoke-test: - echo "${GH_DIR}" - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG} - - export COL_IMAGE="docker.io/parity/polkadot-collator:latest" # Use cumulus lastest image + - export COL_IMAGE="docker.io/parity/polkadot-parachain:latest" # Use cumulus lastest image script: - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh --github-remote-dir="${GH_DIR}" diff --git a/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml b/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml index 0becb408550a..88b789f37fa1 100644 --- a/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml +++ b/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml @@ -31,7 +31,7 @@ cumulus_based = true [parachains.collator] name = "collator01" image = "{{COL_IMAGE}}" - command = "polkadot-collator" + command = "polkadot-parachain" [[parachains.collator.env]] name = "RUST_LOG" From d3560814687adb4fe4e510e9d3c8902e0a89a165 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 4 Jul 2023 15:16:22 +0000 Subject: [PATCH 080/105] review Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 16 ++++++++++++---- node/network/approval-distribution/src/lib.rs | 2 +- node/primitives/src/approval.rs | 2 +- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 6c5c6ec59435..a2c38ae99b86 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -165,7 +165,8 @@ impl AsMut<[u8]> for BigArray { } } -/// Return an iterator to all core indices we are assigned to. +/// Takes the VRF output as input and returns a Vec of cores the validator is assigned +/// to as a tranche0 checker. fn relay_vrf_modulo_cores( vrf_in_out: &VRFInOut, // Configuration - `relay_vrf_modulo_samples`. @@ -173,6 +174,16 @@ fn relay_vrf_modulo_cores( // Configuration - `n_cores`. max_cores: u32, ) -> Vec { + if num_samples as usize > MAX_MODULO_SAMPLES { + gum::warn!( + target: LOG_TARGET, + n_cores = max_cores, + num_samples, + max_modulo_samples = MAX_MODULO_SAMPLES, + "`num_samples` is greater than `MAX_MODULO_SAMPLES`", + ); + } + vrf_in_out .make_bytes::(approval_types::v2::CORE_RANDOMNESS_CONTEXT) .0 @@ -653,9 +664,6 @@ pub(crate) enum InvalidAssignmentReason { /// /// This function does not check whether the core is actually a valid assignment or not. That should be done /// outside the scope of this function. -/// -/// For v2 assignments of type `AssignmentCertKindV2::RelayVRFModuloCompact` we don't need to pass -/// `claimed_core_index` it won't be used in the check. pub(crate) fn check_assignment_cert( claimed_core_indices: CoreBitfield, validator_index: ValidatorIndex, diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index d1d44a51c70d..adc243400cd2 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -233,7 +233,7 @@ struct PeerEntry { pub version: ProtocolVersion, } -// In case the original gtid topology mechanisms don't work on their own, we need to trade bandwidth +// In case the original grid topology mechanisms don't work on their own, we need to trade bandwidth // for protocol liveliness by introducing aggression. // // Aggression has 3 levels: diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 5510409da799..696ca055fe6a 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -360,7 +360,7 @@ pub mod v2 { /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. /// - /// The context used to produce bytes is [`v2::RELAY_VRF_MODULO_CONTEXT`] + /// The context used to produce bytes is [`v1::RELAY_VRF_MODULO_CONTEXT`] RelayVRFModulo { /// The sample number used in this cert. sample: u32, From a179646f66224da203c80d20c3180660c569d0be Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 4 Jul 2023 15:16:34 +0000 Subject: [PATCH 081/105] fmt Signed-off-by: Andrei Sandu --- .../dispute-coordinator/src/initialized.rs | 101 +++++++++--------- 1 file changed, 50 insertions(+), 51 deletions(-) diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 0924f099d206..7d64c91fb63f 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -217,62 +217,61 @@ impl Initialized { gum::trace!(target: LOG_TARGET, "Waiting for message"); let mut overlay_db = OverlayedBackend::new(backend); let default_confirm = Box::new(|| Ok(())); - let confirm_write = match MuxedMessage::receive(ctx, &mut self.participation_receiver) - .await? - { - MuxedMessage::Participation(msg) => { - gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); - let ParticipationStatement { - session, - candidate_hash, - candidate_receipt, - outcome, - } = self.participation.get_participation_result(ctx, msg).await?; - if let Some(valid) = outcome.validity() { - gum::trace!( - target: LOG_TARGET, - ?session, - ?candidate_hash, - ?valid, - "Issuing local statement based on participation outcome." - ); - self.issue_local_statement( - ctx, - &mut overlay_db, + let confirm_write = + match MuxedMessage::receive(ctx, &mut self.participation_receiver).await? { + MuxedMessage::Participation(msg) => { + gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); + let ParticipationStatement { + session, candidate_hash, candidate_receipt, - session, - valid, - clock.now(), - ) - .await?; - } else { - gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); - } - default_confirm - }, - MuxedMessage::Subsystem(msg) => match msg { - FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { - gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); - self.process_active_leaves_update( - ctx, - &mut overlay_db, - update, - clock.now(), - ) - .await?; + outcome, + } = self.participation.get_participation_result(ctx, msg).await?; + if let Some(valid) = outcome.validity() { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + ?valid, + "Issuing local statement based on participation outcome." + ); + self.issue_local_statement( + ctx, + &mut overlay_db, + candidate_hash, + candidate_receipt, + session, + valid, + clock.now(), + ) + .await?; + } else { + gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); + } default_confirm }, - FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { - gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); - self.scraper.process_finalized_block(&n); - default_confirm + MuxedMessage::Subsystem(msg) => match msg { + FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); + self.process_active_leaves_update( + ctx, + &mut overlay_db, + update, + clock.now(), + ) + .await?; + default_confirm + }, + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); + self.scraper.process_finalized_block(&n); + default_confirm + }, + FromOrchestra::Communication { msg } => + self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, }, - FromOrchestra::Communication { msg } => - self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, - }, - }; + }; if !overlay_db.is_empty() { let ops = overlay_db.into_write_ops(); From de6ea3348a67c057f13863f55fd74539b4ae6913 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 4 Jul 2023 17:29:25 +0000 Subject: [PATCH 082/105] Remove superfuous `assignment_bitfield` from `OurAssignment` Signed-off-by: Andrei Sandu --- .../src/approval_db/v2/migration_helpers.rs | 49 +--- .../approval-voting/src/approval_db/v2/mod.rs | 11 +- node/core/approval-voting/src/criteria.rs | 28 +- node/core/approval-voting/src/lib.rs | 250 +++++++++++------- .../approval-voting/src/persisted_entries.rs | 6 +- node/core/approval-voting/src/tests.rs | 5 - 6 files changed, 162 insertions(+), 187 deletions(-) diff --git a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index 22f93443840d..33aecf5e1b0d 100644 --- a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -58,21 +58,6 @@ fn make_bitvec(len: usize) -> BitVec { bitvec::bitvec![u8, BitOrderLsb0; 0; len] } -pub fn dummy_assignment_bitfield() -> CoreBitfield { - vec![ - CoreIndex(0), - CoreIndex(1), - CoreIndex(2), - CoreIndex(3), - CoreIndex(4), - CoreIndex(5), - CoreIndex(6), - CoreIndex(7), - ] - .try_into() - .expect("If failed, `CoreBitfield` is broken; qed") -} - /// Migrates `OurAssignment`, `CandidateEntry` and `ApprovalEntry` to version 2. /// Returns on any error. /// Must only be used in parachains DB migration code - `polkadot-service` crate. @@ -97,28 +82,13 @@ pub fn migrate_approval_db_v1_to_v2(db: Arc, config: Config) -> Re let mut counter = 0; // Get all candidate entries, approval entries and convert each of them. for block in all_blocks { - for (core_index, candidate_hash) in block.candidates() { + for (_core_index, candidate_hash) in block.candidates() { // Loading the candidate will also perform the conversion to the updated format and return // that represantation. - if let Some(mut candidate_entry) = backend + if let Some(candidate_entry) = backend .load_candidate_entry_v1(&candidate_hash) .map_err(|e| Error::InternalError(e))? { - // Here we patch the core bitfield for all assignments of the candidate. - for (_, approval_entry) in candidate_entry.block_assignments.iter_mut() { - if let Some(our_assignment) = approval_entry.our_assignment_mut() { - // Ensure we are actually patching a dummy bitfield produced by the `load_candidate_entry_v1` code. - // Cannot happen in practice, but better double check. - if our_assignment.assignment_bitfield() == &dummy_assignment_bitfield() { - *our_assignment.assignment_bitfield_mut() = (*core_index).into(); - } else { - gum::warn!( - target: crate::LOG_TARGET, - "Tried to convert an already valid bitfield." - ); - } - } - } // Write the updated representation. overlay.write_candidate_entry(candidate_entry); counter += 1; @@ -157,24 +127,13 @@ pub fn migrate_approval_db_v1_to_v2_sanity_check( // Iterate all blocks and approval entries. for block in all_blocks { - for (core_index, candidate_hash) in block.candidates() { + for (_core_index, candidate_hash) in block.candidates() { // Loading the candidate will also perform the conversion to the updated format and return // that represantation. - if let Some(mut candidate_entry) = backend + if let Some(candidate_entry) = backend .load_candidate_entry(&candidate_hash) .map_err(|e| Error::InternalError(e))? { - // We expect that all assignment bitfieds have only one bit set which corresponds to the core_index in the - // candidates block entry mapping. - for (_, approval_entry) in candidate_entry.block_assignments.iter_mut() { - if let Some(our_assignment) = approval_entry.our_assignment_mut() { - assert_eq!(our_assignment.assignment_bitfield().count_ones(), 1); - assert_eq!( - our_assignment.assignment_bitfield().first_one().unwrap(), - core_index.0 as usize - ); - } - } candidates.insert(candidate_entry.candidate.hash()); } } diff --git a/node/core/approval-voting/src/approval_db/v2/mod.rs b/node/core/approval-voting/src/approval_db/v2/mod.rs index 08a5b2bb9d9d..714ca8985fc2 100644 --- a/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -17,10 +17,7 @@ //! Version 2 of the DB schema. use parity_scale_codec::{Decode, Encode}; -use polkadot_node_primitives::approval::{ - v1::DelayTranche, - v2::{AssignmentCertV2, CoreBitfield}, -}; +use polkadot_node_primitives::approval::{v1::DelayTranche, v2::AssignmentCertV2}; use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{ @@ -46,8 +43,8 @@ pub mod tests; // DB migration support. pub use migration_helpers::{ - dummy_assignment_bitfield, migrate_approval_db_v1_to_v2, - migrate_approval_db_v1_to_v2_fill_test_data, migrate_approval_db_v1_to_v2_sanity_check, + migrate_approval_db_v1_to_v2, migrate_approval_db_v1_to_v2_fill_test_data, + migrate_approval_db_v1_to_v2_sanity_check, }; /// `DbBackend` is a concrete implementation of the higher-level Backend trait @@ -186,8 +183,6 @@ pub struct OurAssignment { pub validator_index: ValidatorIndex, /// Whether the assignment has been triggered already. pub triggered: bool, - /// A subset of the core indices obtained from the VRF output. - pub assignment_bitfield: CoreBitfield, } /// Metadata regarding a specific tranche of assignments for a specific candidate. diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index a2c38ae99b86..4243d4a12dce 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -32,7 +32,6 @@ use sp_application_crypto::ByteArray; use merlin::Transcript; use schnorrkel::vrf::VRFInOut; -use super::approval_db::v2::dummy_assignment_bitfield; use itertools::Itertools; use std::collections::{hash_map::Entry, HashMap}; @@ -46,8 +45,6 @@ pub struct OurAssignment { validator_index: ValidatorIndex, // Whether the assignment has been triggered already. triggered: bool, - // The core indices obtained from the VRF output. - assignment_bitfield: CoreBitfield, } impl OurAssignment { @@ -70,15 +67,6 @@ impl OurAssignment { pub(crate) fn mark_triggered(&mut self) { self.triggered = true; } - - pub(crate) fn assignment_bitfield(&self) -> &CoreBitfield { - &self.assignment_bitfield - } - - // Needed for v1 to v2 db migration. - pub(crate) fn assignment_bitfield_mut(&mut self) -> &mut CoreBitfield { - &mut self.assignment_bitfield - } } impl From for OurAssignment { @@ -88,7 +76,6 @@ impl From for OurAssignment { tranche: entry.tranche, validator_index: entry.validator_index, triggered: entry.triggered, - assignment_bitfield: entry.assignment_bitfield, } } } @@ -100,7 +87,6 @@ impl From for crate::approval_db::v2::OurAssignment { tranche: entry.tranche, validator_index: entry.validator_index, triggered: entry.triggered, - assignment_bitfield: entry.assignment_bitfield, } } } @@ -479,7 +465,6 @@ fn compute_relay_vrf_modulo_assignments_v1( tranche: 0, validator_index, triggered: false, - assignment_bitfield: core.into(), }); } } @@ -555,7 +540,7 @@ fn compute_relay_vrf_modulo_assignments_v2( }; // All assignments of type RelayVRFModulo have tranche 0. - OurAssignment { cert, tranche: 0, validator_index, triggered: false, assignment_bitfield } + OurAssignment { cert, tranche: 0, validator_index, triggered: false } }) { for core_index in assigned_cores { assignments.insert(core_index, assignment.clone()); @@ -589,13 +574,7 @@ fn compute_relay_vrf_delay_assignments( }, }; - let our_assignment = OurAssignment { - cert, - tranche, - validator_index, - triggered: false, - assignment_bitfield: core.into(), - }; + let our_assignment = OurAssignment { cert, tranche, validator_index, triggered: false }; let used = match assignments.entry(core) { Entry::Vacant(e) => { @@ -839,9 +818,6 @@ impl From for OurAssignment { validator_index: value.validator_index, // Whether the assignment has been triggered already. triggered: value.triggered, - // This is a dummy value, assignment bitfield will be set later. - // The migration sanity check will test for 1 single bit being set here. - assignment_bitfield: dummy_assignment_bitfield(), } } } diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 62f9c8123422..0367c64dc034 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -26,7 +26,10 @@ use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ v1::{BlockApprovalMeta, DelayTranche, IndirectSignedApprovalVote}, - v2::{BitfieldError, CandidateBitfield, CoreBitfield, IndirectAssignmentCertV2}, + v2::{ + AssignmentCertKindV2, BitfieldError, CandidateBitfield, CoreBitfield, + IndirectAssignmentCertV2, + }, }, ValidationResult, DISPUTE_WINDOW, }; @@ -50,9 +53,9 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, - GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, - ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, + DisputeStatement, GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -730,7 +733,7 @@ enum Action { tick: Tick, }, LaunchApproval { - claimed_core_indices: CoreBitfield, + claimed_candidate_indices: CandidateBitfield, candidate_hash: CandidateHash, indirect_cert: IndirectAssignmentCertV2, assignment_tranche: DelayTranche, @@ -952,7 +955,7 @@ async fn handle_actions( actions_iter = next_actions.into_iter(); }, Action::LaunchApproval { - claimed_core_indices, + claimed_candidate_indices, candidate_hash, indirect_cert, assignment_tranche, @@ -980,37 +983,10 @@ async fn handle_actions( launch_approval_span.add_string_tag("block-hash", format!("{:?}", block_hash)); let validator_index = indirect_cert.validator; - // Find all candidates indices for the certificate claimed cores. - let block_entry = match overlayed_db.load_block_entry(&block_hash)? { - Some(b) => b, - None => { - gum::warn!(target: LOG_TARGET, ?block_hash, "Missing block entry"); - - continue - }, - }; - - // Get an assignment bitfield for the given claimed cores. - match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { - Ok(bitfield) => { - ctx.send_unbounded_message( - ApprovalDistributionMessage::DistributeAssignment( - indirect_cert, - bitfield, - ), - ); - }, - Err(err) => { - // Never happens, it should only happen if no cores are claimed, which is a bug. - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?err, - "Failed to create assignment bitfield" - ); - continue - }, - }; + ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment( + indirect_cert, + claimed_candidate_indices, + )); match approvals_cache.get(&candidate_hash) { Some(ApprovalOutcome::Approved) => { @@ -1097,6 +1073,30 @@ fn cores_to_candidate_indices( CandidateBitfield::try_from(candidate_indices) } +// Returns the claimed core bitfield from the assignment cert, the candidate hash and a `BlockEntry`. +// Can fail only for VRF Delay assignments for which we cannot find the candidate hash in the block entry which +// indicates a bug or corrupted storage. +fn get_assignment_core_indices( + assignment: &AssignmentCertKindV2, + candidate_hash: &CandidateHash, + block_entry: &BlockEntry, +) -> Option { + match &assignment { + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => + Some(core_bitfield.clone()), + AssignmentCertKindV2::RelayVRFModulo { sample: _ } => block_entry + .candidates() + .iter() + .position(|(_, h)| candidate_hash == h) + .map(|index| { + CoreBitfield::try_from(vec![CoreIndex(index as u32)]) + .expect("Not an empty vec; qed") + }), + AssignmentCertKindV2::RelayVRFDelay { core_index } => + Some(CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed")), + } +} + fn distribution_messages_for_activation( db: &OverlayedBackend<'_, impl Backend>, state: &State, @@ -1161,68 +1161,94 @@ fn distribution_messages_for_activation( match approval_entry.local_statements() { (None, None) | (None, Some(_)) => {}, // second is impossible case. (Some(assignment), None) => { - match cores_to_candidate_indices( - assignment.assignment_bitfield(), + if let Some(claimed_core_indices) = get_assignment_core_indices( + &assignment.cert().kind, + &candidate_hash, &block_entry, ) { - Ok(bitfield) => messages.push( - ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - bitfield, + match cores_to_candidate_indices( + &claimed_core_indices, + &block_entry, + ) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }, + bitfield, + ), ), - ), - Err(err) => { - // Should never happen. If we fail here it means the assignment is null (no cores claimed). - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - ?err, - "Failed to create assignment bitfield", - ); - }, + Err(err) => { + // Should never happen. If we fail here it means the assignment is null (no cores claimed). + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + }, + } + } else { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "Cannot get assignment claimed core indices", + ); } }, (Some(assignment), Some(approval_sig)) => { - match cores_to_candidate_indices( - assignment.assignment_bitfield(), + if let Some(claimed_core_indices) = get_assignment_core_indices( + &assignment.cert().kind, + &candidate_hash, &block_entry, ) { - Ok(bitfield) => messages.push( - ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - bitfield, + match cores_to_candidate_indices( + &claimed_core_indices, + &block_entry, + ) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }, + bitfield, + ), ), - ), - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - ?err, - "Failed to create assignment bitfield", - ); - // If we didn't send assignment, we don't send approval. - continue - }, - } + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + // If we didn't send assignment, we don't send approval. + continue + }, + } - messages.push(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVote { - block_hash, - candidate_index: i as _, - validator: assignment.validator_index(), - signature: approval_sig, - }, - )) + messages.push(ApprovalDistributionMessage::DistributeApproval( + IndirectSignedApprovalVote { + block_hash, + candidate_index: i as _, + validator: assignment.validator_index(), + signature: approval_sig, + }, + )); + } else { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "Cannot get assignment claimed core indices", + ); + } }, } }, @@ -2522,7 +2548,7 @@ async fn process_wakeup( None }; - if let Some((claimed_core_indices, cert, val_index, tranche)) = maybe_cert { + if let Some((cert, val_index, tranche)) = maybe_cert { let indirect_cert = IndirectAssignmentCertV2 { block_hash: relay_block, validator: val_index, cert }; @@ -2534,16 +2560,40 @@ async fn process_wakeup( "Launching approval work.", ); - actions.push(Action::LaunchApproval { - claimed_core_indices, - candidate_hash, - indirect_cert, - assignment_tranche: tranche, - relay_block_hash: relay_block, - session: block_entry.session(), - candidate: candidate_receipt, - backing_group, - }); + if let Some(claimed_core_indices) = + get_assignment_core_indices(&indirect_cert.cert.kind, &candidate_hash, &block_entry) + { + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(claimed_candidate_indices) => { + actions.push(Action::LaunchApproval { + claimed_candidate_indices, + candidate_hash, + indirect_cert, + assignment_tranche: tranche, + relay_block_hash: relay_block, + session: block_entry.session(), + candidate: candidate_receipt, + backing_group, + }); + }, + Err(err) => { + // Never happens, it should only happen if no cores are claimed, which is a bug. + gum::warn!( + target: LOG_TARGET, + block_hash = ?relay_block, + ?err, + "Failed to create assignment bitfield" + ); + }, + }; + } else { + gum::warn!( + target: LOG_TARGET, + block_hash = ?relay_block, + ?candidate_hash, + "Cannot get assignment claimed core indices", + ); + } } // Although we checked approval earlier in this function, // this wakeup might have advanced the state to approved via diff --git a/node/core/approval-voting/src/persisted_entries.rs b/node/core/approval-voting/src/persisted_entries.rs index bf9b0b44d26f..3f5c2766154d 100644 --- a/node/core/approval-voting/src/persisted_entries.rs +++ b/node/core/approval-voting/src/persisted_entries.rs @@ -22,7 +22,7 @@ use polkadot_node_primitives::approval::{ v1::{DelayTranche, RelayVRFStory}, - v2::{AssignmentCertV2, CoreBitfield}, + v2::AssignmentCertV2, }; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, @@ -124,7 +124,7 @@ impl ApprovalEntry { pub fn trigger_our_assignment( &mut self, tick_now: Tick, - ) -> Option<(CoreBitfield, AssignmentCertV2, ValidatorIndex, DelayTranche)> { + ) -> Option<(AssignmentCertV2, ValidatorIndex, DelayTranche)> { let our = self.our_assignment.as_mut().and_then(|a| { if a.triggered() { return None @@ -137,7 +137,7 @@ impl ApprovalEntry { our.map(|a| { self.import_assignment(a.tranche(), a.validator_index(), tick_now); - (a.assignment_bitfield().clone(), a.cert().clone(), a.validator_index(), a.tranche()) + (a.cert().clone(), a.validator_index(), a.tranche()) }) } diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 117d69597fc4..8d9679915176 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -2349,7 +2349,6 @@ fn subsystem_validate_approvals_cache() { approval_db::v2::OurAssignment { cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) .into(), - assignment_bitfield: CoreIndex(0u32).into(), tranche: 0, validator_index: ValidatorIndex(0), triggered: false, @@ -2365,9 +2364,6 @@ fn subsystem_validate_approvals_cache() { .try_into() .unwrap(), }), - assignment_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] - .try_into() - .unwrap(), tranche: 0, validator_index: ValidatorIndex(0), triggered: false, @@ -2586,7 +2582,6 @@ where approval_db::v2::OurAssignment { cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) .into(), - assignment_bitfield: CoreIndex(0).into(), tranche: our_assigned_tranche, validator_index: ValidatorIndex(0), triggered: false, From 21e53eeaedf30b7c91c9135b0e8e6bd3fbbea54c Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 24 Jul 2023 13:12:42 +0300 Subject: [PATCH 083/105] feedback Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 30 ++++++++----------- node/network/bitfield-distribution/src/lib.rs | 4 +-- .../network/statement-distribution/src/lib.rs | 4 +-- 3 files changed, 17 insertions(+), 21 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index adc243400cd2..82d404908d21 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -382,17 +382,13 @@ impl Knowledge { // entries for each assigned candidate. This fakes knowledge of individual assignments, but // we need to share the same `MessageSubject` with the followup approval candidate index. if kind == MessageKind::Assignment && success && message.1.count_ones() > 1 { - message - .1 - .iter_ones() - .map(|candidate_index| candidate_index as CandidateIndex) - .fold(success, |success, candidate_index| { - success & - self.insert( - MessageSubject(message.0, candidate_index.into(), message.2), - kind, - ) - }) + for candidate_index in message.1.iter_ones() { + success = success && + self.insert( + MessageSubject(message.0, candidate_index.into(), message.2), + kind, + ); + } } else { success } @@ -1530,7 +1526,7 @@ impl State { let v1_peers = filter_by_peer_version(&peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(&peers, ValidationVersion::VStaging.into()); - if v1_peers.len() > 0 { + if !v1_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( v1_peers, Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( @@ -1540,7 +1536,7 @@ impl State { .await; } - if v2_peers.len() > 0 { + if !v2_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( v2_peers, Versioned::VStaging( @@ -2186,7 +2182,7 @@ pub(crate) async fn send_assignments_batched( let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); - if v1_peers.len() > 0 { + if !v1_peers.is_empty() { // Older peers(v1) do not understand `AssignmentsV2` messages, so we have to filter these out. let v1_assignments = v2_assignments .clone() @@ -2201,7 +2197,7 @@ pub(crate) async fn send_assignments_batched( } } - if v2_peers.len() > 0 { + if !v2_peers.is_empty() { let mut v2_batches = v2_assignments.into_iter().peekable(); while v2_batches.peek().is_some() { @@ -2221,7 +2217,7 @@ pub(crate) async fn send_approvals_batched( let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); - if v1_peers.len() > 0 { + if !v1_peers.is_empty() { let mut batches = approvals.clone().into_iter().peekable(); while batches.peek().is_some() { @@ -2238,7 +2234,7 @@ pub(crate) async fn send_approvals_batched( } } - if v2_peers.len() > 0 { + if !v2_peers.is_empty() { let mut batches = approvals.into_iter().peekable(); while batches.peek().is_some() { diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 1dec38287ea3..e84db5b0cfdd 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -453,7 +453,7 @@ async fn relay_message( let v2_peers = filter_by_peer_version(&interested_peers, ValidationVersion::VStaging.into()); - if v1_peers.len() > 0 { + if !v1_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( v1_peers, message.clone().into_validation_protocol(ValidationVersion::V1.into()), @@ -461,7 +461,7 @@ async fn relay_message( .await; } - if v2_peers.len() > 0 { + if !v2_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( v2_peers, message.into_validation_protocol(ValidationVersion::VStaging.into()), diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 6abd7e42db67..630236ed235e 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -1119,7 +1119,7 @@ async fn circulate_statement<'a, Context>( let v1_peers = filter_by_peer_version(&peers_to_send, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(&peers_to_send, ValidationVersion::VStaging.into()); - if v1_peers.len() > 0 { + if !v1_peers.is_empty() { let payload = statement_message( relay_parent, stored.statement.clone(), @@ -1130,7 +1130,7 @@ async fn circulate_statement<'a, Context>( .await; } - if v2_peers.len() > 0 { + if !v2_peers.is_empty() { let payload = statement_message( relay_parent, stored.statement.clone(), From fae7e5286e07e8bc68ec7ce7a47de5fc5856a102 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 24 Jul 2023 13:43:49 +0300 Subject: [PATCH 084/105] remove old comment Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/criteria.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 4243d4a12dce..ce6859998f1b 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -265,7 +265,6 @@ pub(crate) trait AssignmentCriteria { assignment: &AssignmentCertV2, // Backing groups for each "leaving core". backing_groups: Vec, - // TODO: maybe define record or something else than tuple ) -> Result; } From b5cf4b84d7cbb20c48a03f54f08bf059b54b1312 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 27 Jul 2023 13:31:51 +0300 Subject: [PATCH 085/105] fix build Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 82d404908d21..7cd93e9c1c8b 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -360,7 +360,7 @@ impl Knowledge { } fn insert(&mut self, message: MessageSubject, kind: MessageKind) -> bool { - let success = match self.known_messages.entry(message.clone()) { + let mut success = match self.known_messages.entry(message.clone()) { hash_map::Entry::Vacant(vacant) => { vacant.insert(kind); // If there are multiple candidates assigned in the message, create @@ -385,13 +385,12 @@ impl Knowledge { for candidate_index in message.1.iter_ones() { success = success && self.insert( - MessageSubject(message.0, candidate_index.into(), message.2), + MessageSubject(message.0, vec![candidate_index as u32].try_into().expect("Non-empty vec; qed"), message.2), kind, ); } - } else { - success } + success } } From 6e25ae25e158e2e11867694fe39a9df4af07cf1d Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 27 Jul 2023 17:42:04 +0300 Subject: [PATCH 086/105] Fix get_assignment_core_indices() Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/lib.rs | 13 ++++++------- node/network/approval-distribution/src/lib.rs | 6 +++++- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 0367c64dc034..4bbc86462e21 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -53,9 +53,9 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, - DisputeStatement, GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, + GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, + ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -1087,10 +1087,9 @@ fn get_assignment_core_indices( AssignmentCertKindV2::RelayVRFModulo { sample: _ } => block_entry .candidates() .iter() - .position(|(_, h)| candidate_hash == h) - .map(|index| { - CoreBitfield::try_from(vec![CoreIndex(index as u32)]) - .expect("Not an empty vec; qed") + .find(|(_core_index, h)| candidate_hash == h) + .map(|(core_index, _candidate_hash)| { + CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed") }), AssignmentCertKindV2::RelayVRFDelay { core_index } => Some(CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed")), diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 7cd93e9c1c8b..5a5e1339d5a7 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -385,7 +385,11 @@ impl Knowledge { for candidate_index in message.1.iter_ones() { success = success && self.insert( - MessageSubject(message.0, vec![candidate_index as u32].try_into().expect("Non-empty vec; qed"), message.2), + MessageSubject( + message.0, + vec![candidate_index as u32].try_into().expect("Non-empty vec; qed"), + message.2, + ), kind, ); } From 5b565edd4b2a17d5f8d4bec4866ef6cdf2a13e09 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Thu, 27 Jul 2023 17:42:04 +0300 Subject: [PATCH 087/105] some TODOs done --- node/core/approval-voting/src/lib.rs | 13 ++++--- node/network/approval-distribution/src/lib.rs | 6 +++- node/primitives/src/approval.rs | 19 ++++++----- .../src/node/approval/approval-voting.md | 34 +++++++++++-------- .../src/protocol-approval.md | 10 +++--- .../implementers-guide/src/types/approval.md | 29 ++++++++++++++++ .../implementers-guide/src/types/runtime.md | 2 +- 7 files changed, 77 insertions(+), 36 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 0367c64dc034..4bbc86462e21 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -53,9 +53,9 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, - DisputeStatement, GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, + GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, + ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -1087,10 +1087,9 @@ fn get_assignment_core_indices( AssignmentCertKindV2::RelayVRFModulo { sample: _ } => block_entry .candidates() .iter() - .position(|(_, h)| candidate_hash == h) - .map(|index| { - CoreBitfield::try_from(vec![CoreIndex(index as u32)]) - .expect("Not an empty vec; qed") + .find(|(_core_index, h)| candidate_hash == h) + .map(|(core_index, _candidate_hash)| { + CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed") }), AssignmentCertKindV2::RelayVRFDelay { core_index } => Some(CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed")), diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 7cd93e9c1c8b..5a5e1339d5a7 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -385,7 +385,11 @@ impl Knowledge { for candidate_index in message.1.iter_ones() { success = success && self.insert( - MessageSubject(message.0, vec![candidate_index as u32].try_into().expect("Non-empty vec; qed"), message.2), + MessageSubject( + message.0, + vec![candidate_index as u32].try_into().expect("Non-empty vec; qed"), + message.2, + ), kind, ); } diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 696ca055fe6a..dda7d0e4a845 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -357,14 +357,6 @@ pub mod v2 { /// - introduced RelayVRFModuloCompact #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum AssignmentCertKindV2 { - /// An assignment story based on the VRF that authorized the relay-chain block where the - /// candidate was included combined with a sample number. - /// - /// The context used to produce bytes is [`v1::RELAY_VRF_MODULO_CONTEXT`] - RelayVRFModulo { - /// The sample number used in this cert. - sample: u32, - }, /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the /// candidates were included. /// @@ -372,7 +364,7 @@ pub mod v2 { RelayVRFModuloCompact { /// A bitfield representing the core indices claimed by this assignment. core_bitfield: CoreBitfield, - }, + } = 0, /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with the index of a particular core. /// @@ -381,6 +373,15 @@ pub mod v2 { /// The core index chosen in this cert. core_index: CoreIndex, }, + /// Deprectated assignment. Soon to be removed. + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`v1::RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModulo { + /// The sample number used in this cert. + sample: u32, + }, } /// A certification of assignment. diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md index d8d9826a0f01..8ccd76a4b983 100644 --- a/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -49,24 +49,28 @@ struct TrancheEntry { assignments: Vec<(ValidatorIndex, Tick)>, } -struct OurAssignment { - cert: AssignmentCert, - tranche: DelayTranche, - validator_index: ValidatorIndex, - triggered: bool, - /// A subset of the core indices obtained from the VRF output. - assignment_bitfield: AssignmentBitfield, +pub struct OurAssignment { + /// Our assignment certificate. + cert: AssignmentCertV2, + /// The tranche for which the assignment refers to. + tranche: DelayTranche, + /// Our validator index for the session in which the candidates were included. + validator_index: ValidatorIndex, + /// Whether the assignment has been triggered already. + triggered: bool, } -struct ApprovalEntry { - tranches: Vec, // sorted ascending by tranche number. - backing_group: GroupIndex, - our_assignment: Option, - our_approval_sig: Option, - assignments: Bitfield, // n_validators bits - approved: bool, +pub struct ApprovalEntry { + tranches: Vec, + backing_group: GroupIndex, + our_assignment: Option, + our_approval_sig: Option, + // `n_validators` bits. + assigned_validators: Bitfield, + approved: bool, } + struct CandidateEntry { candidate: CandidateReceipt, session: SessionIndex, @@ -202,6 +206,8 @@ On receiving a `ApprovalVotingMessage::CheckAndImportAssignment` message, we che * Determine the claimed core index by looking up the candidate with given index in `block_entry.candidates`. Return `AssignmentCheckResult::Bad` if missing. * Check the assignment cert * If the cert kind is `RelayVRFModulo`, then the certificate is valid as long as `sample < session_info.relay_vrf_samples` and the VRF is valid for the validator's key with the input `block_entry.relay_vrf_story ++ sample.encode()` as described with [the approvals protocol section](../../protocol-approval.md#assignment-criteria). We set `core_index = vrf.make_bytes().to_u32() % session_info.n_cores`. If the `BlockEntry` causes inclusion of a candidate at `core_index`, then this is a valid assignment for the candidate at `core_index` and has delay tranche 0. Otherwise, it can be ignored. + * If the cert kind is `RelayVRFModuloCompact`, then the certificate is valid as long as the VRF is valid for the validator's key with the input `block_entry.relay_vrf_story ++ relay_vrf_samples.encode()` as described with [the approvals protocol section](../../protocol-approval.md#assignment-criteria). We enforce that all `core_bitfield` indices are included in the set of the core indices sampled from the VRF Output. The assignment is considered a valid tranche0 assignment for all claimed candidates if all `core_bitfield` indices match the core indices where the claimed candidates were included at. + * If the cert kind is `RelayVRFDelay`, then we check if the VRF is valid for the validator's key with the input `block_entry.relay_vrf_story ++ cert.core_index.encode()` as described in [the approvals protocol section](../../protocol-approval.md#assignment-criteria). The cert can be ignored if the block did not cause inclusion of a candidate on that core index. Otherwise, this is a valid assignment for the included candidate. The delay tranche for the assignment is determined by reducing `(vrf.make_bytes().to_u64() % (session_info.n_delay_tranches + session_info.zeroth_delay_tranche_width)).saturating_sub(session_info.zeroth_delay_tranche_width)`. * We also check that the core index derived by the output is covered by the `VRFProof` by means of an auxiliary signature. * If the delay tranche is too far in the future, return `AssignmentCheckResult::TooFarInFuture`. diff --git a/roadmap/implementers-guide/src/protocol-approval.md b/roadmap/implementers-guide/src/protocol-approval.md index 693822ce0797..70339863e525 100644 --- a/roadmap/implementers-guide/src/protocol-approval.md +++ b/roadmap/implementers-guide/src/protocol-approval.md @@ -98,12 +98,14 @@ We want checkers for candidate equivocations that lie outside our preferred rela Assignment criteria compute actual assignments using stories and the validators' secret approval assignment key. Assignment criteria output a `Position` consisting of both a `ParaId` to be checked, as well as a precedence `DelayTranche` for when the assignment becomes valid. -Assignment criteria come in three flavors, `RelayVRFModulo`, `RelayVRFDelay` and `RelayEquivocation`. Among these, both `RelayVRFModulo` and `RelayVRFDelay` run a VRF whose input is the output of a `RelayVRFStory`, while `RelayEquivocation` runs a VRF whose input is the output of a `RelayEquivocationStory`. +Assignment criteria come in three flavors, `RelayVRFModuloCompact`, `RelayVRFDelay`, `RelayEquivocation` and the deprecated `RelayVRFModulo`. Among these, `RelayVRFModulo`, `RelayVRFModuloCompact` and `RelayVRFDelay` run a VRF whose input is the output of a `RelayVRFStory`, while `RelayEquivocation` runs a VRF whose input is the output of a `RelayEquivocationStory`. Among these, we have two distinct VRF output computations: `RelayVRFModulo` runs several distinct samples whose VRF input is the `RelayVRFStory` and the sample number. It computes the VRF output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "A&V Core", reduces this number modulo the number of availability cores, and outputs the candidate just declared available by, and included by aka leaving, that availability core. We drop any samples that return no candidate because no candidate was leaving the sampled availability core in this relay chain block. We choose three samples initially, but we could make polkadot more secure and efficient by increasing this to four or five, and reducing the backing checks accordingly. All successful `RelayVRFModulo` samples are assigned delay tranche zero. +`RelayVRFModuloCompact` runs a single samples whose VRF input is the `RelayVRFStory` and the sample count. Similar to `RelayVRFModulo` introduces multiple core assignments for tranche zero. It computes the VRF output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "A&V Core v2" and samples up to 160 bytes of the output as an array of `u32`. Then reduces each `u32` modulo the number of availability cores, and outputs up to `relay_vrf_modulo_samples` availability core indices. + There is no sampling process for `RelayVRFDelay` and `RelayEquivocation`. We instead run them on specific candidates and they compute a delay from their VRF output. `RelayVRFDelay` runs for all candidates included under, aka declared available by, a relay chain block, and inputs the associated VRF output via `RelayVRFStory`. `RelayEquivocation` runs only on candidate block equivocations, and inputs their block hashes via the `RelayEquivocation` story. `RelayVRFDelay` and `RelayEquivocation` both compute their output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "A&V Tranche" and reduce the result modulo `num_delay_tranches + zeroth_delay_tranche_width`, and consolidate results 0 through `zeroth_delay_tranche_width` to be 0. In this way, they ensure the zeroth delay tranche has `zeroth_delay_tranche_width+1` times as many assignments as any other tranche. @@ -114,9 +116,9 @@ As future work (or TODO?), we should merge assignment notices with the same dela We track all validators' announced approval assignments for each candidate associated to each relay chain block, which tells us which validators were assigned to which candidates. -We permit at most one assignment per candidate per story per validator, so one validator could be assigned under both the `RelayVRFDelay` and `RelayEquivocation` criteria, but not under both `RelayVRFModulo` and `RelayVRFDelay` criteria, since those both use the same story. We permit only one approval vote per candidate per validator, which counts for any applicable criteria. +We permit at most one assignment per candidate per story per validator, so one validator could be assigned under both the `RelayVRFDelay` and `RelayEquivocation` criteria, but not under both `RelayVRFModulo/RelayVRFModuloCompact` and `RelayVRFDelay` criteria, since those both use the same story. We permit only one approval vote per candidate per validator, which counts for any applicable criteria. -We announce, and start checking for, our own assignments when the delay of their tranche is reached, but only if the tracker says the assignee candidate requires more approval checkers. We never announce an assignment we believe unnecessary because early announcements gives an adversary information. All delay tranche zero assignments always get announced, which includes all `RelayVRFModulo` assignments. +We announce, and start checking for, our own assignments when the delay of their tranche is reached, but only if the tracker says the assignee candidate requires more approval checkers. We never announce an assignment we believe unnecessary because early announcements gives an adversary information. All delay tranche zero assignments always get announced, which includes all `RelayVRFModulo` and `RelayVRFModuloCompact` assignments. In other words, if some candidate `C` needs more approval checkers by the time we reach round `t` then any validators with an assignment to `C` in delay tranche `t` gossip their send assignment notice for `C`, and begin reconstruction and validation for 'C. If however `C` reached enough assignments, then validators with later assignments skip announcing their assignments. @@ -164,7 +166,7 @@ We need the chain to win in this case, but doing this requires imposing an annoy ## Parameters -We prefer doing approval checkers assignments under `RelayVRFModulo` as opposed to `RelayVRFDelay` because `RelayVRFModulo` avoids giving individual checkers too many assignments and tranche zero assignments benefit security the most. We suggest assigning at least 16 checkers under `RelayVRFModulo` although assignment levels have never been properly analyzed. +We prefer doing approval checkers assignments under `RelayVRFModulo` or `RelayVRFModuloCompact` as opposed to `RelayVRFDelay` because `RelayVRFModulo` avoids giving individual checkers too many assignments and tranche zero assignments benefit security the most. We suggest assigning at least 16 checkers under `RelayVRFModulo` or `RelayVRFModuloCompact` although assignment levels have never been properly analyzed. Our delay criteria `RelayVRFDelay` and `RelayEquivocation` both have two primary paramaters, expected checkers per tranche and the zeroth delay tranche width. diff --git a/roadmap/implementers-guide/src/types/approval.md b/roadmap/implementers-guide/src/types/approval.md index b58e0a8187e1..2cc9b34cc700 100644 --- a/roadmap/implementers-guide/src/types/approval.md +++ b/roadmap/implementers-guide/src/types/approval.md @@ -20,6 +20,35 @@ enum AssignmentCertKind { } } +enum AssignmentCertKindV2 { + /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the + /// candidates were included. + /// + /// The context is [`v2::RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModuloCompact { + /// A bitfield representing the core indices claimed by this assignment. + core_bitfield: CoreBitfield, + }, + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with the index of a particular core. + /// + /// The context is [`v2::RELAY_VRF_DELAY_CONTEXT`] + RelayVRFDelay { + /// The core index chosen in this cert. + core_index: CoreIndex, + }, + /// Deprectated assignment. Soon to be removed. + /// + /// An assignment story based on the VRF that authorized the relay-chain block where the + /// candidate was included combined with a sample number. + /// + /// The context used to produce bytes is [`v1::RELAY_VRF_MODULO_CONTEXT`] + RelayVRFModulo { + /// The sample number used in this cert. + sample: u32, + }, +} + struct AssignmentCert { // The criterion which is claimed to be met by this cert. kind: AssignmentCertKind, diff --git a/roadmap/implementers-guide/src/types/runtime.md b/roadmap/implementers-guide/src/types/runtime.md index 55c0a571b6c8..81d4f6a7ce75 100644 --- a/roadmap/implementers-guide/src/types/runtime.md +++ b/roadmap/implementers-guide/src/types/runtime.md @@ -55,7 +55,7 @@ struct HostConfiguration { pub zeroth_delay_tranche_width: u32, /// The number of validators needed to approve a block. pub needed_approvals: u32, - /// The number of samples to do of the RelayVRFModulo approval assignment criterion. + /// The number of samples to use in `RelayVRFModulo` or `RelayVRFModuloCompact` approval assignment criterions. pub relay_vrf_modulo_samples: u32, /// Total number of individual messages allowed in the parachain -> relay-chain message queue. pub max_upward_queue_count: u32, From 910e1ec24396a75db39fa260534414bc9a24fc6f Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 28 Jul 2023 01:59:10 +0300 Subject: [PATCH 088/105] fix test build Signed-off-by: Andrei Sandu --- node/network/bridge/src/rx/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index 319d94bb04e2..3a970a220ae4 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -1343,7 +1343,7 @@ fn our_view_updates_decreasing_order_and_limited_to_max() { fn network_protocol_versioning_view_update() { let (oracle, handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness; let peer_ids: Vec<_> = (0..2).map(|_| PeerId::random()).collect(); let peers = [ @@ -1397,7 +1397,7 @@ fn network_protocol_versioning_view_update() { fn network_protocol_versioning_subsystem_msg() { let (oracle, _handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, ..} = test_harness; let peer = PeerId::random(); From 58e028add8eb238c7f0b91a12653937ede8747cf Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Fri, 28 Jul 2023 12:45:35 +0300 Subject: [PATCH 089/105] review feedback Signed-off-by: Andrei Sandu --- .../src/approval_db/v2/migration_helpers.rs | 6 +- .../approval-voting/src/approval_db/v2/mod.rs | 28 +++---- node/core/approval-voting/src/backend.rs | 18 +++-- node/core/approval-voting/src/criteria.rs | 12 ++- node/core/approval-voting/src/tests.rs | 32 ++++---- node/network/approval-distribution/src/lib.rs | 76 +++++++++---------- node/network/bridge/src/rx/tests.rs | 2 +- node/primitives/src/approval.rs | 6 +- node/service/src/parachains_db/mod.rs | 5 +- node/service/src/parachains_db/upgrade.rs | 26 ++----- .../src/protocol-approval.md | 2 +- 11 files changed, 104 insertions(+), 109 deletions(-) diff --git a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index 33aecf5e1b0d..2e0775554ba3 100644 --- a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -61,7 +61,7 @@ fn make_bitvec(len: usize) -> BitVec { /// Migrates `OurAssignment`, `CandidateEntry` and `ApprovalEntry` to version 2. /// Returns on any error. /// Must only be used in parachains DB migration code - `polkadot-service` crate. -pub fn migrate_approval_db_v1_to_v2(db: Arc, config: Config) -> Result<()> { +pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { let mut backend = crate::DbBackend::new(db, config); let all_blocks = backend .load_all_blocks() @@ -107,7 +107,7 @@ pub fn migrate_approval_db_v1_to_v2(db: Arc, config: Config) -> Re // Checks if the migration doesn't leave the DB in an unsane state. // This function is to be used in tests. -pub fn migrate_approval_db_v1_to_v2_sanity_check( +pub fn v1_to_v2_sanity_check( db: Arc, config: Config, expected_candidates: HashSet, @@ -145,7 +145,7 @@ pub fn migrate_approval_db_v1_to_v2_sanity_check( } // Fills the db with dummy data in v1 scheme. -pub fn migrate_approval_db_v1_to_v2_fill_test_data( +pub fn v1_to_v2_fill_test_data( db: Arc, config: Config, ) -> Result> { diff --git a/node/core/approval-voting/src/approval_db/v2/mod.rs b/node/core/approval-voting/src/approval_db/v2/mod.rs index 714ca8985fc2..dac8d5d4b0f6 100644 --- a/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -31,22 +31,16 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use std::{collections::BTreeMap, sync::Arc}; use crate::{ - backend::{Backend, BackendWriteOp}, + backend::{Backend, BackendWriteOp, V1ReadBackend}, persisted_entries, }; const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; -mod migration_helpers; +pub mod migration_helpers; #[cfg(test)] pub mod tests; -// DB migration support. -pub use migration_helpers::{ - migrate_approval_db_v1_to_v2, migrate_approval_db_v1_to_v2_fill_test_data, - migrate_approval_db_v1_to_v2_sanity_check, -}; - /// `DbBackend` is a concrete implementation of the higher-level Backend trait pub struct DbBackend { inner: Arc, @@ -61,6 +55,16 @@ impl DbBackend { } } +impl V1ReadBackend for DbBackend { + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) + .map(|e| e.map(Into::into)) + } +} + impl Backend for DbBackend { fn load_block_entry( &self, @@ -76,14 +80,6 @@ impl Backend for DbBackend { load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) } - fn load_candidate_entry_v1( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) - .map(|e| e.map(Into::into)) - } - fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { load_blocks_at_height(&*self.inner, &self.config, block_height) } diff --git a/node/core/approval-voting/src/backend.rs b/node/core/approval-voting/src/backend.rs index d3af78b3036c..0e3f04cc7e96 100644 --- a/node/core/approval-voting/src/backend.rs +++ b/node/core/approval-voting/src/backend.rs @@ -44,8 +44,7 @@ pub enum BackendWriteOp { } /// An abstraction over backend storage for the logic of this subsystem. -/// Implementation must always target latest storage version, but we might introduce -/// methods to enable db migration, like `load_candidate_entry_v1`. +/// Implementation must always target latest storage version. pub trait Backend { /// Load a block entry from the DB. fn load_block_entry(&self, hash: &Hash) -> SubsystemResult>; @@ -54,11 +53,7 @@ pub trait Backend { &self, candidate_hash: &CandidateHash, ) -> SubsystemResult>; - /// Load a candidate entry from the DB with scheme version 1. - fn load_candidate_entry_v1( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult>; + /// Load all blocks at a specific height. fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult>; /// Load all block from the DB. @@ -71,6 +66,15 @@ pub trait Backend { I: IntoIterator; } +/// A read only backed to enable db migration from version 1 of DB. +pub trait V1ReadBackend: Backend { + /// Load a candidate entry from the DB with scheme version 1. + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult>; +} + // Status of block range in the `OverlayedBackend`. #[derive(PartialEq)] enum BlockRangeStatus { diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index ce6859998f1b..33731a5fe3fa 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -685,6 +685,8 @@ pub(crate) fn check_assignment_cert( let vrf_output = &assignment.vrf.output; let vrf_proof = &assignment.vrf.proof; + let first_claimed_core_index = + claimed_core_indices.first_one().expect("Checked above; qed") as u32; match &assignment.kind { AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => { @@ -746,15 +748,13 @@ pub(crate) fn check_assignment_cert( relay_vrf_modulo_transcript_v1(relay_vrf_story, *sample), &vrf_output.0, &vrf_proof.0, - assigned_core_transcript(CoreIndex( - claimed_core_indices.first_one().expect("Checked above; qed") as u32, - )), + assigned_core_transcript(CoreIndex(first_claimed_core_index)), ) .map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?; let core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores); // ensure that the `vrf_in_out` actually gives us the claimed core. - if core.0 as usize == claimed_core_indices.first_one().expect("Checked above; qed") { + if core.0 == first_claimed_core_index { Ok(0) } else { gum::debug!( @@ -777,9 +777,7 @@ pub(crate) fn check_assignment_cert( return Err(InvalidAssignment(Reason::InvalidArguments)) } - if core_index.0 as usize != - claimed_core_indices.first_one().expect("Checked above; qed") - { + if core_index.0 != first_claimed_core_index { return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch)) } diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 8d9679915176..d373bc244dfd 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -274,6 +274,15 @@ struct TestStoreInner { candidate_entries: HashMap, } +impl V1ReadBackend for TestStoreInner { + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + self.load_candidate_entry(candidate_hash) + } +} + impl Backend for TestStoreInner { fn load_block_entry(&self, block_hash: &Hash) -> SubsystemResult> { Ok(self.block_entries.get(block_hash).cloned()) @@ -286,13 +295,6 @@ impl Backend for TestStoreInner { Ok(self.candidate_entries.get(candidate_hash).cloned()) } - fn load_candidate_entry_v1( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - self.load_candidate_entry(candidate_hash) - } - fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult> { Ok(self.blocks_at_height.get(height).cloned().unwrap_or_default()) } @@ -352,6 +354,15 @@ pub struct TestStore { store: Arc>, } +impl V1ReadBackend for TestStore { + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + self.load_candidate_entry(candidate_hash) + } +} + impl Backend for TestStore { fn load_block_entry(&self, block_hash: &Hash) -> SubsystemResult> { let store = self.store.lock(); @@ -366,13 +377,6 @@ impl Backend for TestStore { store.load_candidate_entry(candidate_hash) } - fn load_candidate_entry_v1( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - self.load_candidate_entry(candidate_hash) - } - fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult> { let store = self.store.lock(); store.load_blocks_at_height(height) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index c4838b4d866e..ef39c7beff75 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -101,9 +101,9 @@ impl RecentlyOutdated { // Contains topology routing information for assignments and approvals. struct ApprovalRouting { - pub required_routing: RequiredRouting, - pub local: bool, - pub random_routing: RandomRouting, + required_routing: RequiredRouting, + local: bool, + random_routing: RandomRouting, } // This struct is responsible for tracking the full state of an assignment and grid routing information. @@ -1529,48 +1529,46 @@ impl State { } } - // The entry is created when assignment is imported, so we assume this exists. - let approval_entry = entry.get_approval_entry(candidate_index, validator_index); - if approval_entry.is_none() { - let peer_id = source.peer_id(); - // This indicates a bug in approval-distribution, since we check the knowledge at the begining of the function. - gum::warn!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Unknown approval assignment", - ); - // No rep change as this is caused by an issue - return - } - - let approval_entry = approval_entry.expect("Just checked above; qed"); - - // Invariant: to our knowledge, none of the peers except for the `source` know about the approval. - metrics.on_approval_imported(); + let required_routing = match entry.get_approval_entry(candidate_index, validator_index) { + Some(approval_entry) => { + // Invariant: to our knowledge, none of the peers except for the `source` know about the approval. + metrics.on_approval_imported(); - if let Err(err) = approval_entry.note_approval(vote.clone()) { - // this would indicate a bug in approval-voting: - // - validator index mismatch - // - candidate index mismatch - // - duplicate approval - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?candidate_index, - ?validator_index, - ?err, - "Possible bug: Vote import failed", - ); + if let Err(err) = approval_entry.note_approval(vote.clone()) { + // this would indicate a bug in approval-voting: + // - validator index mismatch + // - candidate index mismatch + // - duplicate approval + gum::warn!( + target: LOG_TARGET, + hash = ?block_hash, + ?candidate_index, + ?validator_index, + ?err, + "Possible bug: Vote import failed", + ); - return - } + return + } - let required_routing = approval_entry.routing_info().required_routing; + approval_entry.routing_info().required_routing + }, + None => { + let peer_id = source.peer_id(); + // This indicates a bug in approval-distribution, since we check the knowledge at the begining of the function. + gum::warn!( + target: LOG_TARGET, + ?peer_id, + ?message_subject, + "Unknown approval assignment", + ); + // No rep change as this is caused by an issue + return + }, + }; // Dispatch a ApprovalDistributionV1Message::Approval(vote) // to all peers required by the topology, with the exception of the source peer. - let topology = self.topologies.get_topology(entry.session); let source_peer = source.peer_id(); diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index 3a970a220ae4..1e22ebbba10a 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -1397,7 +1397,7 @@ fn network_protocol_versioning_view_update() { fn network_protocol_versioning_subsystem_msg() { let (oracle, _handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer, ..} = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness; let peer = PeerId::random(); diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index c917acb1039f..00037b774c76 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -355,21 +355,22 @@ pub mod v2 { /// Certificate is changed compared to `AssignmentCertKind`: /// - introduced RelayVRFModuloCompact - #[repr(u8)] #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum AssignmentCertKindV2 { /// Multiple assignment stories based on the VRF that authorized the relay-chain block where the /// candidates were included. /// /// The context is [`v2::RELAY_VRF_MODULO_CONTEXT`] + #[codec(index = 0)] RelayVRFModuloCompact { /// A bitfield representing the core indices claimed by this assignment. core_bitfield: CoreBitfield, - } = 0, + }, /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with the index of a particular core. /// /// The context is [`v2::RELAY_VRF_DELAY_CONTEXT`] + #[codec(index = 1)] RelayVRFDelay { /// The core index chosen in this cert. core_index: CoreIndex, @@ -379,6 +380,7 @@ pub mod v2 { /// candidate was included combined with a sample number. /// /// The context used to produce bytes is [`v1::RELAY_VRF_MODULO_CONTEXT`] + #[codec(index = 2)] RelayVRFModulo { /// The sample number used in this cert. sample: u32, diff --git a/node/service/src/parachains_db/mod.rs b/node/service/src/parachains_db/mod.rs index d4926a4cb00b..92f3f167f22f 100644 --- a/node/service/src/parachains_db/mod.rs +++ b/node/service/src/parachains_db/mod.rs @@ -43,7 +43,10 @@ pub(crate) mod columns { // Version 4 only changed structures in approval voting, so we can re-export the v4 definitions. pub mod v3 { - pub use super::v4::*; + pub use super::v4::{ + COL_APPROVAL_DATA, COL_AVAILABILITY_DATA, COL_AVAILABILITY_META, + COL_CHAIN_SELECTION_DATA, COL_DISPUTE_COORDINATOR_DATA, NUM_COLUMNS, ORDERED_COL, + }; } pub mod v4 { diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 020b058d7979..7d79d3b6a513 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -23,7 +23,7 @@ use std::{ }; use polkadot_node_core_approval_voting::approval_db::v2::{ - migrate_approval_db_v1_to_v2, Config as ApprovalDbConfig, + migration_helpers::v1_to_v2, Config as ApprovalDbConfig, }; type Version = u32; @@ -187,8 +187,7 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result { let db_path = path @@ -201,8 +200,7 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result Date: Fri, 28 Jul 2023 13:09:22 +0300 Subject: [PATCH 090/105] use V1ReadBackend Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index d373bc244dfd..da0a19e52b80 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -15,6 +15,7 @@ // along with Polkadot. If not, see . use super::*; +use crate::backend::V1ReadBackend; use polkadot_node_primitives::{ approval::{ v1::{ From a8eae59b71c9a9f5dc3efa9541d348b4ab0db0ad Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 31 Jul 2023 22:48:36 +0300 Subject: [PATCH 091/105] sanitize bitfields Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/lib.rs | 23 ++++ node/core/approval-voting/src/tests.rs | 59 +++++++++- node/network/approval-distribution/src/lib.rs | 104 ++++++++++++++++-- node/subsystem-types/src/messages.rs | 2 + 4 files changed, 180 insertions(+), 8 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 76e507504977..2c0509439ce8 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1915,6 +1915,29 @@ where )), }; + let n_cores = session_info.n_cores as usize; + + // Early check the candidate bitfield and core bitfields lengths < `n_cores`. + // `approval-distribution` already checks for core and claimed candidate bitfields + // to be equal in size. A check for claimed candidate bitfields should be enough here. + if candidate_indices.len() >= n_cores { + gum::debug!( + target: LOG_TARGET, + validator = assignment.validator.0, + n_cores, + candidate_bitfield_len = ?candidate_indices.len(), + "Oversized bitfield", + ); + + println!("Oversized bitfield {:?}", n_cores); + return Ok(( + AssignmentCheckResult::Bad(AssignmentCheckError::InvalidBitfield( + candidate_indices.len(), + )), + Vec::new(), + )) + } + // The Compact VRF modulo assignment cert has multiple core assignments. let mut backing_groups = Vec::new(); let mut claimed_core_indices = Vec::new(); diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index da0a19e52b80..ad784bb504d4 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -808,7 +808,7 @@ fn session_info(keys: &[Sr25519Keyring]) -> SessionInfo { vec![ValidatorIndex(0)], vec![ValidatorIndex(1)], ]), - n_cores: keys.len() as _, + n_cores: 10, needed_approvals: 2, zeroth_delay_tranche_width: 5, relay_vrf_modulo_samples: 3, @@ -1508,6 +1508,63 @@ fn subsystem_rejects_assignment_with_unknown_candidate() { }); } +#[test] +fn subsystem_rejects_oversized_bitfields() { + test_harness(HarnessConfig::default(), |test_harness| async move { + let TestHarness { mut virtual_overseer, sync_oracle_handle: _sync_oracle_handle, .. } = + test_harness; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let candidate_index = 10; + let validator = ValidatorIndex(0); + + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + ) + .build(&mut virtual_overseer) + .await; + + let rx = check_and_import_assignment( + &mut virtual_overseer, + block_hash, + candidate_index, + validator, + ) + .await; + + assert_eq!( + rx.await, + Ok(AssignmentCheckResult::Bad(AssignmentCheckError::InvalidBitfield( + candidate_index as usize + 1 + ))), + ); + + let rx = check_and_import_assignment_v2( + &mut virtual_overseer, + block_hash, + vec![1, 2, 10, 50], + validator, + ) + .await; + + assert_eq!( + rx.await, + Ok(AssignmentCheckResult::Bad(AssignmentCheckError::InvalidBitfield(51))), + ); + virtual_overseer + }); +} + #[test] fn subsystem_accepts_and_imports_approval_after_assignment() { test_harness(HarnessConfig::default(), |test_harness| async move { diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index ef39c7beff75..7837c4bac58a 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -33,8 +33,10 @@ use polkadot_node_network_protocol::{ Versioned, View, }; use polkadot_node_primitives::approval::{ - v1::{BlockApprovalMeta, IndirectSignedApprovalVote}, - v2::{AsBitIndex, CandidateBitfield, IndirectAssignmentCertV2}, + v1::{ + AssignmentCertKind, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, + }, + v2::{AsBitIndex, AssignmentCertKindV2, CandidateBitfield, IndirectAssignmentCertV2}, }; use polkadot_node_subsystem::{ messages::{ @@ -66,11 +68,15 @@ const COST_DUPLICATE_MESSAGE: Rep = Rep::CostMinorRepeated("Peer sent identical const COST_ASSIGNMENT_TOO_FAR_IN_THE_FUTURE: Rep = Rep::CostMinor("The vote was valid but too far in the future"); const COST_INVALID_MESSAGE: Rep = Rep::CostMajor("The vote was bad"); +const COST_OVERSIZED_BITFIELD: Rep = Rep::CostMajor("Oversized certificate or candidate bitfield"); const BENEFIT_VALID_MESSAGE: Rep = Rep::BenefitMinor("Peer sent a valid message"); const BENEFIT_VALID_MESSAGE_FIRST: Rep = Rep::BenefitMinorFirst("Valid message with new information"); +// Maximum valid size for the `CandidateBitfield` in the assignment messages. +const MAX_BITFIELD_SIZE: usize = 500; + /// The Approval Distribution subsystem. pub struct ApprovalDistribution { metrics: Metrics, @@ -855,7 +861,17 @@ impl State { num = assignments.len(), "Processing assignments from a peer", ); - self.process_incoming_assignments(ctx, metrics, peer_id, assignments, rng).await; + let sanitized_assignments = + self.sanitize_v2_assignments(peer_id, ctx.sender(), assignments).await; + + self.process_incoming_assignments( + ctx, + metrics, + peer_id, + sanitized_assignments, + rng, + ) + .await; }, Versioned::V1(protocol_v1::ApprovalDistributionMessage::Assignments(assignments)) => { gum::trace!( @@ -865,14 +881,14 @@ impl State { "Processing assignments from a peer", ); + let sanitized_assignments = + self.sanitize_v1_assignments(peer_id, ctx.sender(), assignments).await; + self.process_incoming_assignments( ctx, metrics, peer_id, - assignments - .into_iter() - .map(|(cert, candidate)| (cert.into(), candidate.into())) - .collect::>(), + sanitized_assignments, rng, ) .await; @@ -1913,6 +1929,80 @@ impl State { ) .await; } + + // Filter out invalid candidate index and certificate core bitfields. + // For each invalid assignment we also punish the peer. + async fn sanitize_v1_assignments( + &mut self, + peer_id: PeerId, + sender: &mut impl overseer::ApprovalDistributionSenderTrait, + assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, + ) -> Vec<(IndirectAssignmentCertV2, CandidateBitfield)> { + let mut sanitized_assignments = Vec::new(); + for (cert, candidate_index) in assignments.into_iter() { + let cert_bitfield_bits = match cert.cert.kind { + AssignmentCertKind::RelayVRFDelay { core_index } => core_index.0 as usize + 1, + // We don't want to run the VRF yet, but the output is always bounded by `n_cores`. + // We assume `candidate_bitfield` length for the core bitfield and we just check against + // `MAX_BITFIELD_SIZE` later. + AssignmentCertKind::RelayVRFModulo { .. } => candidate_index as usize + 1, + }; + + // Ensure bitfields length under hard limit. + if cert_bitfield_bits > MAX_BITFIELD_SIZE || + cert_bitfield_bits != candidate_index as usize + 1 + { + // Punish the peer for the invalid message. + modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) + .await; + } else { + sanitized_assignments.push((cert.into(), candidate_index.into())) + } + } + + sanitized_assignments + } + + // Filter out oversized candidate and certificate core bitfields. + // For each invalid assignment we also punish the peer. + async fn sanitize_v2_assignments( + &mut self, + peer_id: PeerId, + sender: &mut impl overseer::ApprovalDistributionSenderTrait, + assignments: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, + ) -> Vec<(IndirectAssignmentCertV2, CandidateBitfield)> { + let mut sanitized_assignments = Vec::new(); + for (cert, candidate_bitfield) in assignments.into_iter() { + let cert_bitfield_bits = match &cert.cert.kind { + AssignmentCertKindV2::RelayVRFDelay { core_index } => core_index.0 as usize + 1, + // We don't want to run the VRF yet, but the output is always bounded by `n_cores`. + // We assume `candidate_bitfield` length for the core bitfield and we just check against + // `MAX_BITFIELD_SIZE` later. + AssignmentCertKindV2::RelayVRFModulo { .. } => candidate_bitfield.len(), + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => + core_bitfield.len(), + }; + + let candidate_bitfield_len = candidate_bitfield.len(); + // Our bitfield has `Lsb0`. + let msb = candidate_bitfield_len - 1; + + // Ensure bitfields length under hard limit. + if cert_bitfield_bits > MAX_BITFIELD_SIZE + || cert_bitfield_bits != candidate_bitfield_len + // Ensure minimum bitfield size - MSB needs to be one. + || !candidate_bitfield.bit_at(msb.as_bit_index()) + { + // Punish the peer for the invalid message. + modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) + .await; + } else { + sanitized_assignments.push((cert, candidate_bitfield)) + } + } + + sanitized_assignments + } } // This adjusts the required routing of messages in blocks that pass the block filter diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 9cd0c7f3ebfe..4675180c469c 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -765,6 +765,8 @@ pub enum AssignmentCheckError { InvalidCert(ValidatorIndex, String), #[error("Internal state mismatch: {0:?}, {1:?}")] Internal(Hash, CandidateHash), + #[error("Oversized candidate or core bitfield >= {0}")] + InvalidBitfield(usize), } /// The result type of [`ApprovalVotingMessage::CheckAndImportApproval`] request. From 8420b90198b382aac36509240e1c8644438471f2 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 31 Jul 2023 22:52:10 +0300 Subject: [PATCH 092/105] remove print Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 2c0509439ce8..262a7c25c48e 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1929,7 +1929,6 @@ where "Oversized bitfield", ); - println!("Oversized bitfield {:?}", n_cores); return Ok(( AssignmentCheckResult::Bad(AssignmentCheckError::InvalidBitfield( candidate_indices.len(), From ecad9ec5e37d5ca5a876da97bf45a58d4baddf77 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 1 Aug 2023 12:20:35 +0300 Subject: [PATCH 093/105] Add network-staging feature Signed-off-by: Andrei Sandu --- Cargo.toml | 1 + cli/Cargo.toml | 1 + node/network/protocol/Cargo.toml | 3 + node/network/protocol/src/peer_set.rs | 180 +++++++++++++++----------- node/service/Cargo.toml | 1 + 5 files changed, 109 insertions(+), 77 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c23837b9c5ed..8d6705fab8a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,6 +210,7 @@ fast-runtime = [ "polkadot-cli/fast-runtime" ] runtime-metrics = [ "polkadot-cli/runtime-metrics" ] pyroscope = ["polkadot-cli/pyroscope"] jemalloc-allocator = ["polkadot-node-core-pvf-prepare-worker/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator"] +network-protocol-staging = ["polkadot-cli/network-protocol-staging"] # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load diff --git a/cli/Cargo.toml b/cli/Cargo.toml index e7aa562880cc..e74d40b9ccb2 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -78,3 +78,4 @@ rococo-native = ["service/rococo-native"] malus = ["full-node", "service/malus"] runtime-metrics = ["service/runtime-metrics", "polkadot-node-metrics/runtime-metrics"] +network-protocol-staging = ["service/network-protocol-staging"] \ No newline at end of file diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml index f1a481081200..943b3b5fa010 100644 --- a/node/network/protocol/Cargo.toml +++ b/node/network/protocol/Cargo.toml @@ -25,3 +25,6 @@ gum = { package = "tracing-gum", path = "../../gum" } [dev-dependencies] rand_chacha = "0.3.1" + +[features] +network-protocol-staging = [] diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index 0fd68fdc02ab..ff6cb4d9ad1b 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -28,6 +28,13 @@ use std::{ }; use strum::{EnumIter, IntoEnumIterator}; +/// The legacy protocol names. Only supported on version = 1. +const LEGACY_VALIDATION_PROTOCOL_V1: &str = "/polkadot/validation/1"; +const LEGACY_COLLATION_PROTOCOL_V1: &str = "/polkadot/collation/1"; + +/// The legacy protocol version. Is always 1 for both validation & collation. +const LEGACY_PROTOCOL_VERSION_V1: u32 = 1; + /// Max notification size is currently constant. pub const MAX_NOTIFICATION_SIZE: u64 = 100 * 1024; @@ -65,7 +72,7 @@ impl PeerSet { // Networking layer relies on `get_main_name()` being the main name of the protocol // for peersets and connection management. let protocol = peerset_protocol_names.get_main_name(self); - let fallback_names = peerset_protocol_names.get_fallback_names(self); + let fallback_names = PeerSetProtocolNames::get_fallback_names(self); let max_notification_size = self.get_max_notification_size(is_authority); match self { @@ -110,6 +117,13 @@ impl PeerSet { /// Networking layer relies on `get_main_version()` being the version /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { + #[cfg(not(feature = "network-protocol-staging"))] + match self { + PeerSet::Validation => ValidationVersion::V1.into(), + PeerSet::Collation => CollationVersion::V1.into(), + } + + #[cfg(feature = "network-protocol-staging")] match self { PeerSet::Validation => ValidationVersion::VStaging.into(), PeerSet::Collation => CollationVersion::V1.into(), @@ -134,11 +148,14 @@ impl PeerSet { // Unfortunately, labels must be static strings, so we must manually cover them // for all protocol versions here. match self { - PeerSet::Validation => match version { - _ if version == ValidationVersion::V1.into() => Some("validation/1"), - _ if version == ValidationVersion::VStaging.into() => Some("validation/2"), - _ => None, - }, + PeerSet::Validation => + if version == ValidationVersion::V1.into() { + Some("validation/1") + } else if version == ValidationVersion::VStaging.into() { + Some("validation/2") + } else { + None + }, PeerSet::Collation => if version == CollationVersion::V1.into() { Some("collation/1") @@ -203,7 +220,7 @@ impl From for u32 { pub enum ValidationVersion { /// The first version. V1 = 1, - /// The second version adds `AssignmentsV2` message to approval distribution. VStaging + /// The staging version adds `AssignmentsV2` message to approval distribution. VStaging = 2, } @@ -214,11 +231,6 @@ pub enum CollationVersion { V1 = 1, } -impl From for ProtocolVersion { - fn from(version: ValidationVersion) -> ProtocolVersion { - ProtocolVersion(version as u32) - } -} /// Marker indicating the version is unknown. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct UnknownVersion; @@ -237,6 +249,26 @@ impl TryFrom for ValidationVersion { } } +impl TryFrom for CollationVersion { + type Error = UnknownVersion; + + fn try_from(p: ProtocolVersion) -> Result { + for v in Self::iter() { + if v as u32 == p.0 { + return Ok(v) + } + } + + Err(UnknownVersion) + } +} + +impl From for ProtocolVersion { + fn from(version: ValidationVersion) -> ProtocolVersion { + ProtocolVersion(version as u32) + } +} + impl From for ProtocolVersion { fn from(version: CollationVersion) -> ProtocolVersion { ProtocolVersion(version as u32) @@ -247,57 +279,46 @@ impl From for ProtocolVersion { #[derive(Clone)] pub struct PeerSetProtocolNames { protocols: HashMap, - legacy_protocols: HashMap, names: HashMap<(PeerSet, ProtocolVersion), ProtocolName>, - legacy_names: HashMap<(PeerSet, ProtocolVersion), ProtocolName>, } impl PeerSetProtocolNames { /// Construct [`PeerSetProtocols`] using `genesis_hash` and `fork_id`. pub fn new(genesis_hash: Hash, fork_id: Option<&str>) -> Self { let mut protocols = HashMap::new(); - let mut legacy_protocols = HashMap::new(); let mut names = HashMap::new(); - let mut legacy_names = HashMap::new(); - for protocol in PeerSet::iter() { match protocol { - PeerSet::Validation => { - // Main protocol v2 - Self::register_protocol( - &mut protocols, - &mut names, - protocol, - ValidationVersion::VStaging.into(), - &genesis_hash, - fork_id, - ); - - // Legacy protocol v1 - Self::register_protocol( - &mut legacy_protocols, - &mut legacy_names, - protocol, - ValidationVersion::V1.into(), - &genesis_hash, - fork_id, - ); - }, - PeerSet::Collation => Self::register_protocol( - &mut protocols, - &mut names, - protocol, - CollationVersion::V1.into(), - &genesis_hash, - fork_id, - ), + PeerSet::Validation => + for version in ValidationVersion::iter() { + Self::register_main_protocol( + &mut protocols, + &mut names, + protocol, + version.into(), + &genesis_hash, + fork_id, + ); + }, + PeerSet::Collation => + for version in CollationVersion::iter() { + Self::register_main_protocol( + &mut protocols, + &mut names, + protocol, + version.into(), + &genesis_hash, + fork_id, + ); + }, } + Self::register_legacy_protocol(&mut protocols, protocol); } - Self { protocols, legacy_protocols, names, legacy_names } + Self { protocols, names } } - /// Helper function to register a protocol. - fn register_protocol( + /// Helper function to register main protocol. + fn register_main_protocol( protocols: &mut HashMap, names: &mut HashMap<(PeerSet, ProtocolVersion), ProtocolName>, protocol: PeerSet, @@ -310,6 +331,19 @@ impl PeerSetProtocolNames { Self::insert_protocol_or_panic(protocols, protocol_name, protocol, version); } + /// Helper function to register legacy protocol. + fn register_legacy_protocol( + protocols: &mut HashMap, + protocol: PeerSet, + ) { + Self::insert_protocol_or_panic( + protocols, + Self::get_legacy_name(protocol), + protocol, + ProtocolVersion(LEGACY_PROTOCOL_VERSION_V1), + ) + } + /// Helper function to make sure no protocols have the same name. fn insert_protocol_or_panic( protocols: &mut HashMap, @@ -336,10 +370,7 @@ impl PeerSetProtocolNames { /// Lookup the protocol using its on the wire name. pub fn try_get_protocol(&self, name: &ProtocolName) -> Option<(PeerSet, ProtocolVersion)> { - self.protocols - .get(name) - .or_else(|| self.legacy_protocols.get(name)) - .map(ToOwned::to_owned) + self.protocols.get(name).map(ToOwned::to_owned) } /// Get the main protocol name. It's used by the networking for keeping track @@ -352,20 +383,10 @@ impl PeerSetProtocolNames { pub fn get_name(&self, protocol: PeerSet, version: ProtocolVersion) -> ProtocolName { self.names .get(&(protocol, version)) - .or_else(|| self.legacy_names.get(&(protocol, version))) .expect("Protocols & versions are specified via enums defined above, and they are all registered in `new()`; qed") .clone() } - /// Get the protocol name for legacy versions. - pub fn get_legacy_names(&self, protocol: PeerSet) -> Vec { - self.legacy_names - .iter() - .filter(|((legacy_protocol, _), _)| &protocol == legacy_protocol) - .map(|(_, protocol_name)| protocol_name.clone()) - .collect() - } - /// The protocol name of this protocol based on `genesis_hash` and `fork_id`. fn generate_name( genesis_hash: &Hash, @@ -387,12 +408,19 @@ impl PeerSetProtocolNames { format!("{}/{}/{}", prefix, short_name, version).into() } - /// Get the protocol fallback names. - fn get_fallback_names(&self, protocol: PeerSet) -> Vec { + /// Get the legacy protocol name, only `LEGACY_PROTOCOL_VERSION` = 1 is supported. + fn get_legacy_name(protocol: PeerSet) -> ProtocolName { match protocol { - PeerSet::Validation => self.get_legacy_names(protocol), - PeerSet::Collation => vec![], + PeerSet::Validation => LEGACY_VALIDATION_PROTOCOL_V1, + PeerSet::Collation => LEGACY_COLLATION_PROTOCOL_V1, } + .into() + } + + /// Get the protocol fallback names. Currently only holds the legacy name + /// for `LEGACY_PROTOCOL_VERSION` = 1. + fn get_fallback_names(protocol: PeerSet) -> Vec { + std::iter::once(Self::get_legacy_name(protocol)).collect() } } @@ -468,19 +496,13 @@ mod tests { let protocol_names = PeerSetProtocolNames::new(genesis_hash, None); let validation_main = - "/7ac8741de8b7146d8a5617fd462914557fe63c265a7f1c10e7dae32858eebb80/validation/2"; + "/7ac8741de8b7146d8a5617fd462914557fe63c265a7f1c10e7dae32858eebb80/validation/1"; assert_eq!( protocol_names.try_get_protocol(&validation_main.into()), - Some((PeerSet::Validation, TestVersion(2).into())), + Some((PeerSet::Validation, TestVersion(1).into())), ); - let validation_legacy = - "/7ac8741de8b7146d8a5617fd462914557fe63c265a7f1c10e7dae32858eebb80/validation/1"; - - assert_eq!( - protocol_names.get_fallback_names(PeerSet::Validation), - vec![validation_legacy.into()], - ); + let validation_legacy = "/polkadot/validation/1"; assert_eq!( protocol_names.try_get_protocol(&validation_legacy.into()), Some((PeerSet::Validation, TestVersion(1).into())), @@ -493,7 +515,11 @@ mod tests { Some((PeerSet::Collation, TestVersion(1).into())), ); - assert_eq!(protocol_names.get_fallback_names(PeerSet::Collation), vec![],); + let collation_legacy = "/polkadot/collation/1"; + assert_eq!( + protocol_names.try_get_protocol(&collation_legacy.into()), + Some((PeerSet::Collation, TestVersion(1).into())), + ); } #[test] diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 0de3d0e8df77..712cb70ef059 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -216,3 +216,4 @@ runtime-metrics = [ "polkadot-runtime?/runtime-metrics", "polkadot-runtime-parachains/runtime-metrics" ] +network-protocol-staging = ["polkadot-node-network-protocol/network-protocol-staging"] \ No newline at end of file From f61d8ab0445c175272c9dad68c20a21ceb40dfbd Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 2 Aug 2023 18:33:07 +0300 Subject: [PATCH 094/105] fix so DistributeAssignment gets sent once for compact assignments Signed-off-by: Andrei Sandu --- Cargo.lock | 2 + node/core/approval-voting/Cargo.toml | 2 + .../src/approval_db/v2/migration_helpers.rs | 1 + .../approval-voting/src/approval_db/v2/mod.rs | 4 + .../src/approval_db/v2/tests.rs | 1 + node/core/approval-voting/src/import.rs | 2 + node/core/approval-voting/src/lib.rs | 24 ++- .../approval-voting/src/persisted_entries.rs | 29 +++- node/core/approval-voting/src/tests.rs | 139 +++++++++++++++++- node/primitives/src/approval.rs | 5 + 10 files changed, 197 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ba1f14ed8b0d..076e94b86996 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6804,11 +6804,13 @@ dependencies = [ "async-trait", "bitvec", "derive_more", + "env_logger 0.9.3", "futures", "futures-timer", "itertools", "kvdb", "kvdb-memorydb", + "log", "lru 0.11.0", "merlin", "parity-scale-codec", diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml index ff6576684119..7abdedc283c0 100644 --- a/node/core/approval-voting/Cargo.toml +++ b/node/core/approval-voting/Cargo.toml @@ -46,3 +46,5 @@ polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +log = "0.4.17" +env_logger = "0.9.0" diff --git a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index 2e0775554ba3..02df34f6ab1a 100644 --- a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -51,6 +51,7 @@ fn make_block_entry( approved_bitfield: make_bitvec(candidates.len()), candidates, children: Vec::new(), + distributed_assignments: Default::default(), } } diff --git a/node/core/approval-voting/src/approval_db/v2/mod.rs b/node/core/approval-voting/src/approval_db/v2/mod.rs index dac8d5d4b0f6..44a012758c06 100644 --- a/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -234,6 +234,10 @@ pub struct BlockEntry { // block. The block can be considered approved if the bitfield has all bits set to `true`. pub approved_bitfield: Bitfield, pub children: Vec, + // Assignments we already distributed. A 1 bit means the candidate index for which + // we already have sent out an assignment. We need this to avoid distributing + // multiple core assignments more than once. + pub distributed_assignments: Bitfield, } impl From for Tick { diff --git a/node/core/approval-voting/src/approval_db/v2/tests.rs b/node/core/approval-voting/src/approval_db/v2/tests.rs index dfcf56ccccc2..50a5a924ca8d 100644 --- a/node/core/approval-voting/src/approval_db/v2/tests.rs +++ b/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -56,6 +56,7 @@ fn make_block_entry( approved_bitfield: make_bitvec(candidates.len()), candidates, children: Vec::new(), + distributed_assignments: Default::default(), } } diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index cf2a90eac34b..4a145050924f 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -511,6 +511,7 @@ pub(crate) async fn handle_new_head( .collect(), approved_bitfield, children: Vec::new(), + distributed_assignments: Default::default(), }; gum::trace!( @@ -1264,6 +1265,7 @@ pub(crate) mod tests { candidates: Vec::new(), approved_bitfield: Default::default(), children: Vec::new(), + distributed_assignments: Default::default(), } .into(), ); diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 262a7c25c48e..f7d7586347fa 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -741,6 +741,7 @@ enum Action { session: SessionIndex, candidate: CandidateReceipt, backing_group: GroupIndex, + distribute_assignment: bool, }, NoteApprovedInChainSelection(Hash), IssueApproval(CandidateHash, ApprovalVoteRequest), @@ -963,6 +964,7 @@ async fn handle_actions( session, candidate, backing_group, + distribute_assignment, } => { // Don't launch approval work if the node is syncing. if let Mode::Syncing(_) = *mode { @@ -983,10 +985,12 @@ async fn handle_actions( launch_approval_span.add_string_tag("block-hash", format!("{:?}", block_hash)); let validator_index = indirect_cert.validator; - ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment( - indirect_cert, - claimed_candidate_indices, - )); + if distribute_assignment { + ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment( + indirect_cert, + claimed_candidate_indices, + )); + } match approvals_cache.get(&candidate_hash) { Some(ApprovalOutcome::Approved) => { @@ -2104,6 +2108,7 @@ where validator = assignment.validator.0, candidate_hashes = ?assigned_candidate_hashes, assigned_cores = ?claimed_core_indices, + ?tranche, "Imported assignments for multiple cores.", ); @@ -2491,7 +2496,7 @@ async fn process_wakeup( let candidate_entry = db.load_candidate_entry(&candidate_hash)?; // If either is not present, we have nothing to wakeup. Might have lost a race with finality - let (block_entry, mut candidate_entry) = match (block_entry, candidate_entry) { + let (mut block_entry, mut candidate_entry) = match (block_entry, candidate_entry) { (Some(b), Some(c)) => (b, c), _ => return Ok(Vec::new()), }; @@ -2586,6 +2591,14 @@ async fn process_wakeup( { match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { Ok(claimed_candidate_indices) => { + // Ensure we distribute multiple core assignments just once. + let distribute_assignment = if claimed_candidate_indices.count_ones() > 1 { + !block_entry.mark_assignment_distributed(claimed_candidate_indices.clone()) + } else { + true + }; + db.write_block_entry(block_entry.clone()); + actions.push(Action::LaunchApproval { claimed_candidate_indices, candidate_hash, @@ -2595,6 +2608,7 @@ async fn process_wakeup( session: block_entry.session(), candidate: candidate_receipt, backing_group, + distribute_assignment, }); }, Err(err) => { diff --git a/node/core/approval-voting/src/persisted_entries.rs b/node/core/approval-voting/src/persisted_entries.rs index 3f5c2766154d..0838d6eb6f8f 100644 --- a/node/core/approval-voting/src/persisted_entries.rs +++ b/node/core/approval-voting/src/persisted_entries.rs @@ -22,7 +22,7 @@ use polkadot_node_primitives::approval::{ v1::{DelayTranche, RelayVRFStory}, - v2::AssignmentCertV2, + v2::{AssignmentCertV2, CandidateBitfield}, }; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, @@ -115,11 +115,6 @@ impl ApprovalEntry { self.our_assignment.as_ref() } - // Needed for v1 to v2 migration. - pub fn our_assignment_mut(&mut self) -> Option<&mut OurAssignment> { - self.our_assignment.as_mut() - } - // Note that our assignment is triggered. No-op if already triggered. pub fn trigger_our_assignment( &mut self, @@ -358,6 +353,10 @@ pub struct BlockEntry { // block. The block can be considered approved if the bitfield has all bits set to `true`. pub approved_bitfield: Bitfield, pub children: Vec, + // A list of assignments for which wea already distributed the assignment. + // We use this to ensure we don't distribute multiple core assignments twice as we track + // individual wakeups for each core. + pub distributed_assignments: Bitfield, } impl BlockEntry { @@ -432,6 +431,22 @@ impl BlockEntry { pub fn parent_hash(&self) -> Hash { self.parent_hash } + + /// Mark distributed assignment for many candidate indices. + /// Returns `true` if an assignment was already distributed for the `candidates`. + pub fn mark_assignment_distributed(&mut self, candidates: CandidateBitfield) -> bool { + let bitfield = candidates.into_inner(); + let total_one_bits = self.distributed_assignments.count_ones(); + + let new_len = std::cmp::max(self.distributed_assignments.len(), bitfield.len()); + self.distributed_assignments.resize(new_len, false); + self.distributed_assignments |= bitfield; + + // If the an operation did not change our current bitfied, we return true. + let distributed = total_one_bits == self.distributed_assignments.count_ones(); + + distributed + } } impl From for BlockEntry { @@ -446,6 +461,7 @@ impl From for BlockEntry { candidates: entry.candidates, approved_bitfield: entry.approved_bitfield, children: entry.children, + distributed_assignments: entry.distributed_assignments, } } } @@ -462,6 +478,7 @@ impl From for crate::approval_db::v2::BlockEntry { candidates: entry.candidates, approved_bitfield: entry.approved_bitfield, children: entry.children, + distributed_assignments: entry.distributed_assignments, } } } diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index ad784bb504d4..20b3bdd76204 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -165,7 +165,7 @@ impl Clock for MockClock { // This mock clock allows us to manipulate the time and // be notified when wakeups have been triggered. -#[derive(Default)] +#[derive(Default, Debug)] struct MockClockInner { tick: Tick, wakeups: Vec<(Tick, oneshot::Sender<()>)>, @@ -500,6 +500,12 @@ fn test_harness>( config: HarnessConfig, test: impl FnOnce(TestHarness) -> T, ) { + let _ = env_logger::builder() + .is_test(true) + .filter(Some("polkadot_node_core_approval_voting"), log::LevelFilter::Trace) + .filter(Some(LOG_TARGET), log::LevelFilter::Trace) + .try_init(); + let HarnessConfig { sync_oracle, sync_oracle_handle, clock, backend, assignment_criteria } = config; @@ -2522,6 +2528,137 @@ fn subsystem_validate_approvals_cache() { }); } +#[test] +fn subsystem_doesnt_distribute_duplicate_compact_assignments() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let cert = garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1)].try_into().unwrap(), + }); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert, + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { + mut virtual_overseer, + sync_oracle_handle: _sync_oracle_handle, + clock, + .. + } = test_harness; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt + }; + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt + }; + let candidate_index1 = 0; + let candidate_index2 = 1; + + // Add block hash 00. + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(0), + candidates: Some(vec![ + (candidate_receipt1.clone(), CoreIndex(0), GroupIndex(1)), + (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), + ]), + session_info: None, + }, + ) + .build(&mut virtual_overseer) + .await; + + // Activate the wakeup present above, and sleep to allow process_wakeups to execute.. + assert_eq!(Some(2), clock.inner.lock().next_wakeup()); + gum::trace!("clock \n{:?}\n", clock.inner.lock()); + + clock.inner.lock().wakeup_all(100); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Assignment is distributed only once from `approval-voting` + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(c_indices, vec![candidate_index1, candidate_index2].try_into().unwrap()); + } + ); + + // Candidate 1 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Candidate 2 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Check if assignment was triggered for candidate 1. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt1.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + // Check if assignment was triggered for candidate 2. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt2.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + virtual_overseer + }); +} + /// Ensure that when two assignments are imported, only one triggers the Approval Checking work async fn handle_double_assignment_import( virtual_overseer: &mut VirtualOverseer, diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 00037b774c76..39fa97b2ca39 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -290,6 +290,11 @@ pub mod v2 { pub fn inner_mut(&mut self) -> &mut BitVec { &mut self.0 } + + /// Returns the inner bitfield and consumes `self`. + pub fn into_inner(self) -> BitVec { + self.0 + } } impl AsBitIndex for CandidateIndex { From 6eefe3f5e75781566d437fdc88a60162f87349d8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 8 Aug 2023 17:39:08 +0300 Subject: [PATCH 095/105] review feedback + fixes Signed-off-by: Andrei Sandu --- .../approval-voting/src/approval_db/v1/mod.rs | 27 ++++++++++- .../src/approval_db/v2/migration_helpers.rs | 45 ++++++++++++------- .../approval-voting/src/approval_db/v2/mod.rs | 18 ++++++++ node/core/approval-voting/src/backend.rs | 3 ++ node/core/approval-voting/src/criteria.rs | 26 +++++------ node/core/approval-voting/src/lib.rs | 5 +-- .../approval-voting/src/persisted_entries.rs | 19 +++++++- node/core/approval-voting/src/tests.rs | 6 +++ node/network/approval-distribution/src/lib.rs | 12 ++--- 9 files changed, 121 insertions(+), 40 deletions(-) diff --git a/node/core/approval-voting/src/approval_db/v1/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs index c324156d2836..011d0a559c02 100644 --- a/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -25,9 +25,10 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::v1::{AssignmentCert, DelayTranche}; use polkadot_primitives::{ - CandidateReceipt, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, + BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, + ValidatorIndex, ValidatorSignature, }; - +use sp_consensus_slots::Slot; use std::collections::BTreeMap; use super::v2::Bitfield; @@ -66,3 +67,25 @@ pub struct CandidateEntry { pub block_assignments: BTreeMap, pub approvals: Bitfield, } + +/// Metadata regarding approval of a particular block, by way of approval of the +/// candidates contained within it. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct BlockEntry { + pub block_hash: Hash, + pub block_number: BlockNumber, + pub parent_hash: Hash, + pub session: SessionIndex, + pub slot: Slot, + /// Random bytes derived from the VRF submitted within the block by the block + /// author as a credential and used as input to approval assignment criteria. + pub relay_vrf_story: [u8; 32], + // The candidates included as-of this block and the index of the core they are + // leaving. Sorted ascending by core index. + pub candidates: Vec<(CoreIndex, CandidateHash)>, + // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. + // The i'th bit is `true` iff the candidate has been approved in the context of this + // block. The block can be considered approved if the bitfield has all bits set to `true`. + pub approved_bitfield: Bitfield, + pub children: Vec, +} diff --git a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index 02df34f6ab1a..e0aea5d7367b 100644 --- a/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -35,13 +35,14 @@ fn dummy_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { AssignmentCert { kind, vrf: VrfSignature { output: VrfOutput(out), proof: VrfProof(proof) } } } -fn make_block_entry( + +fn make_block_entry_v1( block_hash: Hash, parent_hash: Hash, block_number: BlockNumber, candidates: Vec<(CoreIndex, CandidateHash)>, -) -> BlockEntry { - BlockEntry { +) -> crate::approval_db::v1::BlockEntry { + crate::approval_db::v1::BlockEntry { block_hash, parent_hash, block_number, @@ -51,7 +52,6 @@ fn make_block_entry( approved_bitfield: make_bitvec(candidates.len()), candidates, children: Vec::new(), - distributed_assignments: Default::default(), } } @@ -69,7 +69,10 @@ pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { .map_err(|e| Error::InternalError(e))? .iter() .filter_map(|block_hash| { - backend.load_block_entry(block_hash).map_err(|e| Error::InternalError(e)).ok()? + backend + .load_block_entry_v1(block_hash) + .map_err(|e| Error::InternalError(e)) + .ok()? }) .collect::>(); @@ -95,6 +98,7 @@ pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { counter += 1; } } + overlay.write_block_entry(block); } gum::info!(target: crate::LOG_TARGET, "Migrated {} entries", counter); @@ -117,11 +121,9 @@ pub fn v1_to_v2_sanity_check( let all_blocks = backend .load_all_blocks() - .map_err(|e| Error::InternalError(e))? + .unwrap() .iter() - .filter_map(|block_hash| { - backend.load_block_entry(block_hash).map_err(|e| Error::InternalError(e)).ok()? - }) + .map(|block_hash| backend.load_block_entry(block_hash).unwrap().unwrap()) .collect::>(); let mut candidates = HashSet::new(); @@ -131,10 +133,7 @@ pub fn v1_to_v2_sanity_check( for (_core_index, candidate_hash) in block.candidates() { // Loading the candidate will also perform the conversion to the updated format and return // that represantation. - if let Some(candidate_entry) = backend - .load_candidate_entry(&candidate_hash) - .map_err(|e| Error::InternalError(e))? - { + if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { candidates.insert(candidate_entry.candidate.hash()); } } @@ -167,7 +166,7 @@ pub fn v1_to_v2_fill_test_data( let at_height = vec![relay_hash]; - let block_entry = make_block_entry( + let block_entry = make_block_entry_v1( relay_hash, Default::default(), relay_number, @@ -201,10 +200,10 @@ pub fn v1_to_v2_fill_test_data( }; overlay_db.write_blocks_at_height(relay_number, at_height.clone()); - overlay_db.write_block_entry(block_entry.clone().into()); - expected_candidates.insert(candidate_entry.candidate.hash()); + db.write(write_candidate_entry_v1(candidate_entry, config)).unwrap(); + db.write(write_block_entry_v1(block_entry, config)).unwrap(); } let write_ops = overlay_db.into_write_ops(); @@ -226,3 +225,17 @@ fn write_candidate_entry_v1( ); tx } + +// Low level DB helper to write a block entry in v1 scheme. +fn write_block_entry_v1( + block_entry: crate::approval_db::v1::BlockEntry, + config: Config, +) -> DBTransaction { + let mut tx = DBTransaction::new(); + tx.put_vec( + config.col_approval_data, + &block_entry_key(&block_entry.block_hash), + block_entry.encode(), + ); + tx +} diff --git a/node/core/approval-voting/src/approval_db/v2/mod.rs b/node/core/approval-voting/src/approval_db/v2/mod.rs index 44a012758c06..66df6ee8f653 100644 --- a/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -63,6 +63,13 @@ impl V1ReadBackend for DbBackend { load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) .map(|e| e.map(Into::into)) } + + fn load_block_entry_v1( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry_v1(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } } impl Backend for DbBackend { @@ -374,3 +381,14 @@ pub fn load_candidate_entry_v1( .map(|u: Option| u.map(|v| v.into())) .map_err(|e| SubsystemError::with_origin("approval-voting", e)) } + +/// Load a block entry from the aux store in v1 format. +pub fn load_block_entry_v1( + store: &dyn Database, + config: &Config, + block_hash: &Hash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} diff --git a/node/core/approval-voting/src/backend.rs b/node/core/approval-voting/src/backend.rs index 0e3f04cc7e96..374e7a826d19 100644 --- a/node/core/approval-voting/src/backend.rs +++ b/node/core/approval-voting/src/backend.rs @@ -73,6 +73,9 @@ pub trait V1ReadBackend: Backend { &self, candidate_hash: &CandidateHash, ) -> SubsystemResult>; + + /// Load a block entry from the DB with scheme version 1. + fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult>; } // Status of block range in the `OverlayedBackend`. diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 33731a5fe3fa..a629bd5c8bd9 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -712,21 +712,21 @@ pub(crate) fn check_assignment_cert( // Currently validators can opt out of checking specific cores. // This is the same issue to how validator can opt out and not send their assignments in the first place. - // Ensure that the `vrf_in_out` actually includes all of the claimed cores. - if claimed_core_indices.iter_ones().fold(true, |cores_match, core| { - cores_match & resulting_cores.contains(&CoreIndex(core as u32)) - }) { - Ok(0) - } else { - gum::debug!( - target: LOG_TARGET, - ?resulting_cores, - ?claimed_core_indices, - "Assignment claimed cores mismatch", - ); - Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) + for claimed_core_index in claimed_core_indices.iter_ones() { + if !resulting_cores.contains(&CoreIndex(claimed_core_index as u32)) { + gum::debug!( + target: LOG_TARGET, + ?resulting_cores, + ?claimed_core_indices, + vrf_modulo_cores = ?resulting_cores, + "Assignment claimed cores mismatch", + ); + return Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch)) + } } + + Ok(0) }, AssignmentCertKindV2::RelayVRFModulo { sample } => { if *sample >= config.relay_vrf_modulo_samples { diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index f7d7586347fa..2132cee71554 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1922,9 +1922,8 @@ where let n_cores = session_info.n_cores as usize; // Early check the candidate bitfield and core bitfields lengths < `n_cores`. - // `approval-distribution` already checks for core and claimed candidate bitfields - // to be equal in size. A check for claimed candidate bitfields should be enough here. - if candidate_indices.len() >= n_cores { + // Core bitfield length is checked later in `check_assignment_cert`. + if candidate_indices.len() > n_cores { gum::debug!( target: LOG_TARGET, validator = assignment.validator.0, diff --git a/node/core/approval-voting/src/persisted_entries.rs b/node/core/approval-voting/src/persisted_entries.rs index 0838d6eb6f8f..155b2f9c4e02 100644 --- a/node/core/approval-voting/src/persisted_entries.rs +++ b/node/core/approval-voting/src/persisted_entries.rs @@ -356,7 +356,7 @@ pub struct BlockEntry { // A list of assignments for which wea already distributed the assignment. // We use this to ensure we don't distribute multiple core assignments twice as we track // individual wakeups for each core. - pub distributed_assignments: Bitfield, + distributed_assignments: Bitfield, } impl BlockEntry { @@ -466,6 +466,23 @@ impl From for BlockEntry { } } +impl From for BlockEntry { + fn from(entry: crate::approval_db::v1::BlockEntry) -> Self { + BlockEntry { + block_hash: entry.block_hash, + parent_hash: entry.parent_hash, + block_number: entry.block_number, + session: entry.session, + slot: entry.slot, + relay_vrf_story: RelayVRFStory(entry.relay_vrf_story), + candidates: entry.candidates, + approved_bitfield: entry.approved_bitfield, + children: entry.children, + distributed_assignments: Default::default(), + } + } +} + impl From for crate::approval_db::v2::BlockEntry { fn from(entry: BlockEntry) -> Self { Self { diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 20b3bdd76204..41bef9c5f8f2 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -282,6 +282,9 @@ impl V1ReadBackend for TestStoreInner { ) -> SubsystemResult> { self.load_candidate_entry(candidate_hash) } + fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult> { + self.load_block_entry(block_hash) + } } impl Backend for TestStoreInner { @@ -362,6 +365,9 @@ impl V1ReadBackend for TestStore { ) -> SubsystemResult> { self.load_candidate_entry(candidate_hash) } + fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult> { + self.load_block_entry(block_hash) + } } impl Backend for TestStore { diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 7837c4bac58a..9e4252ccfaee 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1948,9 +1948,10 @@ impl State { AssignmentCertKind::RelayVRFModulo { .. } => candidate_index as usize + 1, }; + let candidate_bitfield_bits = candidate_index as usize + 1; + // Ensure bitfields length under hard limit. - if cert_bitfield_bits > MAX_BITFIELD_SIZE || - cert_bitfield_bits != candidate_index as usize + 1 + if cert_bitfield_bits > MAX_BITFIELD_SIZE || candidate_bitfield_bits > MAX_BITFIELD_SIZE { // Punish the peer for the invalid message. modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) @@ -1983,13 +1984,14 @@ impl State { core_bitfield.len(), }; - let candidate_bitfield_len = candidate_bitfield.len(); + let candidate_bitfield_bits = candidate_bitfield.len(); + // Our bitfield has `Lsb0`. - let msb = candidate_bitfield_len - 1; + let msb = candidate_bitfield_bits - 1; // Ensure bitfields length under hard limit. if cert_bitfield_bits > MAX_BITFIELD_SIZE - || cert_bitfield_bits != candidate_bitfield_len + || candidate_bitfield_bits > MAX_BITFIELD_SIZE // Ensure minimum bitfield size - MSB needs to be one. || !candidate_bitfield.bit_at(msb.as_bit_index()) { From 509125f2076029f7af3df1b861e8e1ff8a831d7a Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 9 Aug 2023 14:02:10 +0300 Subject: [PATCH 096/105] fix test build Signed-off-by: Andrei Sandu --- node/network/bitfield-distribution/src/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index b19a9fe22480..f43438a7fc4f 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -513,7 +513,7 @@ fn delay_reputation_change() { msg: BitfieldDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( peer.clone(), - msg.clone().into_network_message(), + msg.clone().into_network_message(ValidationVersion::V1.into()), ), ), }) @@ -539,7 +539,7 @@ fn delay_reputation_change() { msg: BitfieldDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( peer.clone(), - msg.clone().into_network_message(), + msg.clone().into_network_message(ValidationVersion::V1.into()), ), ), }) From bfab907f918c4f63b1f2351aeeac8c10a8fb8a7e Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Mon, 21 Aug 2023 15:33:27 +0300 Subject: [PATCH 097/105] review feedback Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 36 ++++++++----------- .../src/node/approval/approval-voting.md | 5 ++- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 9e4252ccfaee..98b0cfe9faf6 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -214,25 +214,22 @@ impl ApprovalEntry { } // Get the assignment certiticate and claimed candidates. - pub fn get_assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { + pub fn assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { (self.assignment.clone(), self.candidates.clone()) } // Get all approvals for all candidates claimed by the assignment. - pub fn get_approvals(&self) -> Vec { + pub fn approvals(&self) -> Vec { self.approvals.values().cloned().collect::>() } // Get the approval for a specific candidate index. - pub fn get_approval( - &self, - candidate_index: CandidateIndex, - ) -> Option { + pub fn approval(&self, candidate_index: CandidateIndex) -> Option { self.approvals.get(&candidate_index).cloned() } // Get validator index. - pub fn get_validator_index(&self) -> ValidatorIndex { + pub fn validator_index(&self) -> ValidatorIndex { self.validator_index } } @@ -461,7 +458,7 @@ impl BlockEntry { Some(candidate_entry) => { candidate_entry .messages - .entry(entry.get_validator_index()) + .entry(entry.validator_index()) .or_insert(entry.candidates.clone()); }, None => { @@ -499,7 +496,7 @@ impl BlockEntry { // Returns a mutable reference of `ApprovalEntry` for `candidate_index` from validator // `validator_index`. - pub fn get_approval_entry( + pub fn approval_entry( &mut self, candidate_index: CandidateIndex, validator_index: ValidatorIndex, @@ -513,7 +510,7 @@ impl BlockEntry { } // Get all approval entries for a given candidate. - pub fn get_approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&ApprovalEntry> { + pub fn approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&ApprovalEntry> { // Get the keys for fetching `ApprovalEntry` from `self.approval_entries`, let approval_entry_keys = self .candidates @@ -1545,7 +1542,7 @@ impl State { } } - let required_routing = match entry.get_approval_entry(candidate_index, validator_index) { + let required_routing = match entry.approval_entry(candidate_index, validator_index) { Some(approval_entry) => { // Invariant: to our knowledge, none of the peers except for the `source` know about the approval. metrics.on_approval_imported(); @@ -1689,9 +1686,9 @@ impl State { }; let sigs = block_entry - .get_approval_entries(index) + .approval_entries(index) .into_iter() - .filter_map(|approval_entry| approval_entry.get_approval(index)) + .filter_map(|approval_entry| approval_entry.approval(index)) .map(|approval| (approval.validator, approval.signature)) .collect::>(); all_sigs.extend(sigs); @@ -1765,8 +1762,8 @@ impl State { } } - let assignment_message = approval_entry.get_assignment(); - let approval_messages = approval_entry.get_approvals(); + let assignment_message = approval_entry.assignment(); + let approval_messages = approval_entry.approvals(); let (assignment_knowledge, message_kind) = approval_entry.create_assignment_knowledge(block); @@ -2052,7 +2049,7 @@ async fn adjust_required_routing_and_propagate { - // TODO: Fix warning: `Importing locally an already known assignment` for multiple candidate assignments. - // This is due to the fact that we call this on wakeup, and we do have a wakeup for each candidate index, but - // a single assignment claiming the candidates. let _span = state .spans .get(&cert.block_hash) diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md index 8ccd76a4b983..375b8f1f12b3 100644 --- a/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -61,12 +61,11 @@ pub struct OurAssignment { } pub struct ApprovalEntry { - tranches: Vec, + tranches: Vec, // sorted ascending by tranche number. backing_group: GroupIndex, our_assignment: Option, our_approval_sig: Option, - // `n_validators` bits. - assigned_validators: Bitfield, + assigned_validators: Bitfield, // `n_validators` bits. approved: bool, } From 609097402c25af1c7c7f094d11abc7a8ca2d8b81 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 22 Aug 2023 00:19:37 +0300 Subject: [PATCH 098/105] fix Cargo.lock Signed-off-by: Andrei Sandu --- Cargo.lock | 457 ++++++++++++++++++++++++++++------------------------- 1 file changed, 239 insertions(+), 218 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0b166fada41..f5c9b8d0694f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -778,7 +778,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "hash-db", "log", @@ -1847,18 +1847,32 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.1" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" +checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" dependencies = [ "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", "fiat-crypto", - "packed_simd_2", "platforms", + "rustc_version", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", +] + [[package]] name = "cxx" version = "1.0.102" @@ -2339,6 +2353,16 @@ dependencies = [ "signature 1.6.4", ] +[[package]] +name = "ed25519" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +dependencies = [ + "pkcs8 0.10.2", + "signature 2.1.0", +] + [[package]] name = "ed25519-dalek" version = "1.0.1" @@ -2346,13 +2370,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek 3.2.0", - "ed25519", + "ed25519 1.5.3", "rand 0.7.3", "serde", "sha2 0.9.9", "zeroize", ] +[[package]] +name = "ed25519-dalek" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +dependencies = [ + "curve25519-dalek 4.0.0", + "ed25519 2.2.2", + "serde", + "sha2 0.10.7", + "zeroize", +] + [[package]] name = "ed25519-zebra" version = "3.1.0" @@ -2831,7 +2868,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", ] @@ -2854,7 +2891,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-support-procedural", @@ -2879,7 +2916,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "Inflector", "array-bytes", @@ -2927,7 +2964,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2938,7 +2975,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2955,7 +2992,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -2984,7 +3021,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-recursion", "futures", @@ -3006,7 +3043,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "aquamarine", "bitflags 1.3.2", @@ -3044,7 +3081,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "Inflector", "cfg-expr", @@ -3062,7 +3099,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -3074,7 +3111,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "proc-macro2", "quote", @@ -3084,7 +3121,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-executive", @@ -3111,7 +3148,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -3124,7 +3161,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "cfg-if", "frame-support", @@ -3143,7 +3180,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -3158,7 +3195,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "sp-api", @@ -3167,7 +3204,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "parity-scale-codec", @@ -3342,7 +3379,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "chrono", "frame-election-provider-support", @@ -4472,12 +4509,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "libm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" - [[package]] name = "libp2p" version = "0.51.3" @@ -4607,7 +4638,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" dependencies = [ "bs58", - "ed25519-dalek", + "ed25519-dalek 1.0.1", "log", "multiaddr", "multihash", @@ -5346,7 +5377,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "futures", "log", @@ -5365,7 +5396,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "anyhow", "jsonrpsee", @@ -5889,20 +5920,10 @@ dependencies = [ "sha2 0.10.7", ] -[[package]] -name = "packed_simd_2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" -dependencies = [ - "cfg-if", - "libm", -] - [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -5917,7 +5938,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -5933,7 +5954,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -5947,7 +5968,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -5971,7 +5992,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "aquamarine", "docify", @@ -5993,7 +6014,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -6012,7 +6033,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6027,7 +6048,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -6046,7 +6067,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -6070,7 +6091,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6088,7 +6109,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6107,7 +6128,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6124,7 +6145,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6141,7 +6162,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6159,7 +6180,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6182,7 +6203,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6195,7 +6216,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6214,7 +6235,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "docify", "frame-benchmarking", @@ -6233,7 +6254,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6256,7 +6277,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "enumflags2", "frame-benchmarking", @@ -6272,7 +6293,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6292,7 +6313,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6309,7 +6330,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6326,7 +6347,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6345,7 +6366,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6362,7 +6383,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6378,7 +6399,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6394,7 +6415,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -6413,7 +6434,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6433,7 +6454,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -6444,7 +6465,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -6461,7 +6482,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6485,7 +6506,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6502,7 +6523,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6517,7 +6538,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6535,7 +6556,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6550,7 +6571,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6569,7 +6590,7 @@ dependencies = [ [[package]] name = "pallet-salary" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6587,7 +6608,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "docify", "frame-benchmarking", @@ -6605,7 +6626,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -6626,7 +6647,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6642,7 +6663,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6660,7 +6681,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6683,7 +6704,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6694,7 +6715,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "log", "sp-arithmetic", @@ -6703,7 +6724,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "sp-api", @@ -6712,7 +6733,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6729,7 +6750,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6744,7 +6765,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6762,7 +6783,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6781,7 +6802,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-support", "frame-system", @@ -6797,7 +6818,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6813,7 +6834,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6825,7 +6846,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6842,7 +6863,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6857,7 +6878,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6873,7 +6894,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -6888,7 +6909,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-benchmarking", "frame-support", @@ -10091,7 +10112,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "log", "sp-core", @@ -10102,7 +10123,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "futures", @@ -10130,7 +10151,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "futures", "futures-timer", @@ -10153,7 +10174,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -10168,7 +10189,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -10187,7 +10208,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10198,7 +10219,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "chrono", @@ -10237,7 +10258,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "fnv", "futures", @@ -10263,7 +10284,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "hash-db", "kvdb", @@ -10289,7 +10310,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "futures", @@ -10314,7 +10335,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "fork-tree", @@ -10350,7 +10371,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "futures", "jsonrpsee", @@ -10372,7 +10393,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "async-channel", @@ -10406,7 +10427,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "futures", "jsonrpsee", @@ -10425,7 +10446,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "fork-tree", "parity-scale-codec", @@ -10438,7 +10459,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "ahash 0.8.3", "array-bytes", @@ -10479,7 +10500,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "finality-grandpa", "futures", @@ -10499,7 +10520,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "futures", @@ -10522,7 +10543,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -10544,7 +10565,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -10556,7 +10577,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "anyhow", "cfg-if", @@ -10573,7 +10594,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "ansi_term", "futures", @@ -10589,7 +10610,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -10603,7 +10624,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "async-channel", @@ -10644,7 +10665,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-channel", "cid", @@ -10664,7 +10685,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -10681,7 +10702,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "ahash 0.8.3", "futures", @@ -10699,7 +10720,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "async-channel", @@ -10720,7 +10741,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "async-channel", @@ -10754,7 +10775,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "futures", @@ -10772,7 +10793,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "bytes", @@ -10806,7 +10827,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10815,7 +10836,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "futures", "jsonrpsee", @@ -10846,7 +10867,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10865,7 +10886,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "http", "jsonrpsee", @@ -10880,7 +10901,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "futures", @@ -10908,7 +10929,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "directories", @@ -10972,7 +10993,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "log", "parity-scale-codec", @@ -10983,7 +11004,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "clap 4.3.19", "fs4", @@ -10997,7 +11018,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11016,7 +11037,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "futures", "libc", @@ -11035,7 +11056,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "chrono", "futures", @@ -11054,7 +11075,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "ansi_term", "atty", @@ -11083,7 +11104,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11094,7 +11115,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "futures", @@ -11120,7 +11141,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "futures", @@ -11136,7 +11157,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-channel", "futures", @@ -11665,14 +11686,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" +checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.1", + "curve25519-dalek 4.0.0", "rand_core 0.6.4", "ring 0.16.20", "rustc_version", @@ -11720,7 +11741,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "hash-db", "log", @@ -11741,7 +11762,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "Inflector", "blake2", @@ -11755,7 +11776,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "scale-info", @@ -11768,7 +11789,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "integer-sqrt", "num-traits", @@ -11782,7 +11803,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "scale-info", @@ -11795,7 +11816,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "sp-api", "sp-inherents", @@ -11806,7 +11827,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "futures", "log", @@ -11824,7 +11845,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "futures", @@ -11839,7 +11860,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "parity-scale-codec", @@ -11856,7 +11877,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "parity-scale-codec", @@ -11875,7 +11896,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11894,7 +11915,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "finality-grandpa", "log", @@ -11912,7 +11933,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "scale-info", @@ -11924,7 +11945,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "arrayvec 0.7.4", @@ -11971,7 +11992,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "blake2b_simd", "byteorder", @@ -11984,7 +12005,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "quote", "sp-core-hashing", @@ -11994,7 +12015,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -12003,7 +12024,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "proc-macro2", "quote", @@ -12013,7 +12034,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "environmental", "parity-scale-codec", @@ -12024,7 +12045,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "serde_json", "sp-api", @@ -12035,7 +12056,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -12049,11 +12070,10 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "bytes", - "ed25519", - "ed25519-dalek", + "ed25519-dalek 2.0.0", "libsecp256k1", "log", "parity-scale-codec", @@ -12074,7 +12094,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "lazy_static", "sp-core", @@ -12085,7 +12105,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -12097,7 +12117,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "thiserror", "zstd 0.12.4", @@ -12106,7 +12126,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -12117,7 +12137,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -12135,7 +12155,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "scale-info", @@ -12149,7 +12169,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "sp-api", "sp-core", @@ -12159,7 +12179,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "backtrace", "lazy_static", @@ -12169,7 +12189,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "rustc-hash", "serde", @@ -12179,7 +12199,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "either", "hash256-std-hasher", @@ -12201,7 +12221,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -12219,7 +12239,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "Inflector", "proc-macro-crate", @@ -12231,7 +12251,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "scale-info", @@ -12246,7 +12266,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -12260,7 +12280,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "hash-db", "log", @@ -12281,11 +12301,11 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "aes-gcm 0.10.2", - "curve25519-dalek 3.2.0", - "ed25519-dalek", + "curve25519-dalek 4.0.0", + "ed25519-dalek 2.0.0", "hkdf", "parity-scale-codec", "rand 0.8.5", @@ -12299,18 +12319,18 @@ dependencies = [ "sp-runtime-interface", "sp-std", "thiserror", - "x25519-dalek 2.0.0-pre.1", + "x25519-dalek 2.0.0", ] [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "impl-serde", "parity-scale-codec", @@ -12323,7 +12343,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "parity-scale-codec", @@ -12336,7 +12356,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "sp-std", @@ -12348,7 +12368,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "sp-api", "sp-runtime", @@ -12357,7 +12377,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "parity-scale-codec", @@ -12372,7 +12392,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "ahash 0.8.3", "hash-db", @@ -12395,7 +12415,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "impl-serde", "parity-scale-codec", @@ -12412,7 +12432,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -12423,7 +12443,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -12436,7 +12456,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "parity-scale-codec", "scale-info", @@ -12677,12 +12697,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -12701,7 +12721,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "hyper", "log", @@ -12713,7 +12733,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "jsonrpsee", @@ -12726,7 +12746,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -12743,7 +12763,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "array-bytes", "async-trait", @@ -12769,7 +12789,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "futures", "substrate-test-utils-derive", @@ -12779,7 +12799,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12790,7 +12810,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "ansi_term", "build-helper", @@ -13698,7 +13718,7 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#94be94be6d26becd2395b58ae09ca31f596afe7d" +source = "git+https://github.com/paritytech/substrate?branch=master#51695bb7009ea2e0996eb94ab4dfdc643a076702" dependencies = [ "async-trait", "clap 4.3.19", @@ -14520,7 +14540,7 @@ dependencies = [ "tokio", "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-pre.1", + "x25519-dalek 2.0.0", "x509-parser 0.13.2", ] @@ -15039,12 +15059,13 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-pre.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.0.0", "rand_core 0.6.4", + "serde", "zeroize", ] From 4841044e9c9258ce641a4dd2537a4f5e13c30599 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 22 Aug 2023 00:47:32 +0300 Subject: [PATCH 099/105] remove unreachable Signed-off-by: Andrei Sandu --- node/network/collator-protocol/src/collator_side/mod.rs | 4 ---- node/network/collator-protocol/src/validator_side/mod.rs | 4 ---- 2 files changed, 8 deletions(-) diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index d8da1b3a1e10..96978f39a532 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -1187,10 +1187,6 @@ async fn handle_network_msg( NewGossipTopology { .. } => { // impossible! }, - PeerMessage(_, Versioned::VStaging(_)) => gum::warn!( - target: LOG_TARGET, - "Received message on invalid collator protocol version. Only v1 supported", - ), } Ok(()) diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index a13a4d3937d7..e8cf769d2e5f 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1429,10 +1429,6 @@ async fn handle_network_msg( PeerMessage(remote, msg) => { process_incoming_peer_message(ctx, state, remote, msg).await; }, - PeerMessage(_, Versioned::VStaging(_)) => gum::warn!( - target: LOG_TARGET, - "Received message on invalid collator protocol version. Only v1 supported", - ), UpdatedAuthorityIds { .. } => { // The validator side doesn't deal with `AuthorityDiscoveryId`s. }, From c9c7d3c5dd116b2db59650ae57ab9e9d4e483b41 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 22 Aug 2023 01:59:02 +0300 Subject: [PATCH 100/105] Fix zombienet test Signed-off-by: Andrei Sandu --- scripts/ci/gitlab/pipeline/zombienet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index 24c5de436f65..1c8df44cbaa7 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -169,8 +169,8 @@ zombienet-tests-parachains-max-tranche0-approvals: needs: - job: publish-polkadot-debug-image - job: publish-test-collators-image - - job: publish-malus-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional" before_script: - echo "Zombie-net Tests Config" From a9228c03d3b0352e403fcfef2004005d6d1ec5e0 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 22 Aug 2023 12:15:05 +0300 Subject: [PATCH 101/105] happy clippy Signed-off-by: Andrei Sandu --- node/core/approval-voting/src/tests.rs | 2 +- .../approval-distribution/src/tests.rs | 48 +++++++++---------- node/network/bridge/src/tx/tests.rs | 14 +----- node/service/src/parachains_db/upgrade.rs | 5 +- 4 files changed, 27 insertions(+), 42 deletions(-) diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index a8393cd9aa5c..c2ef109ad4ca 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -1138,7 +1138,7 @@ fn blank_subsystem_act_on_bad_block() { FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportAssignment( IndirectAssignmentCertV2 { - block_hash: bad_block_hash.clone(), + block_hash: bad_block_hash, validator: 0u32.into(), cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0, diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 6ae6a953afcd..f0c3c4f8ba64 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -154,7 +154,7 @@ fn make_gossip_topology( assert!(all_peers.len() >= grid_size); let peer_info = |i: usize| TopologyPeerInfo { - peer_ids: vec![all_peers[i].0.clone()], + peer_ids: vec![all_peers[i].0], validator_index: ValidatorIndex::from(i as u32), discovery_id: all_peers[i].1.clone(), }; @@ -231,7 +231,7 @@ async fn setup_peer_with_view( overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( - peer_id.clone(), + *peer_id, ObservedRole::Full, version.into(), None, @@ -241,8 +241,7 @@ async fn setup_peer_with_view( overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer_id.clone(), - view, + *peer_id, view, )), ) .await; @@ -256,7 +255,7 @@ async fn send_message_from_peer( overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer_id.clone(), + *peer_id, Versioned::V1(msg), )), ) @@ -271,7 +270,7 @@ async fn send_message_from_peer_v2( overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer_id.clone(), + *peer_id, Versioned::VStaging(msg), )), ) @@ -618,7 +617,7 @@ fn spam_attack_results_in_negative_reputation_change() { // new block `hash_b` with 20 candidates let candidates_count = 20; let meta = BlockApprovalMeta { - hash: hash_b.clone(), + hash: hash_b, parent_hash, number: 2, candidates: vec![Default::default(); candidates_count], @@ -665,7 +664,7 @@ fn spam_attack_results_in_negative_reputation_change() { overseer_send( overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, View::with_finalized(2), )), ) @@ -728,7 +727,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { overseer_send( overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, view![hash], )), ) @@ -1123,7 +1122,7 @@ fn update_peer_view() { overseer_send( overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, View::new(vec![hash_b, hash_c, hash_d], 2), )), ) @@ -1176,7 +1175,7 @@ fn update_peer_view() { overseer_send( overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, View::with_finalized(finalized_number), )), ) @@ -1339,7 +1338,7 @@ fn sends_assignments_even_when_state_is_approved() { protocol_v1::ApprovalDistributionMessage::Assignments(sent_assignments) )) )) => { - assert_eq!(peers, vec![peer.clone()]); + assert_eq!(peers, vec![*peer]); assert_eq!(sent_assignments, assignments); } ); @@ -1352,7 +1351,7 @@ fn sends_assignments_even_when_state_is_approved() { protocol_v1::ApprovalDistributionMessage::Approvals(sent_approvals) )) )) => { - assert_eq!(peers, vec![peer.clone()]); + assert_eq!(peers, vec![*peer]); assert_eq!(sent_approvals, approvals); } ); @@ -1440,7 +1439,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { protocol_vstaging::ApprovalDistributionMessage::Assignments(sent_assignments) )) )) => { - assert_eq!(peers, vec![peer.clone()]); + assert_eq!(peers, vec![*peer]); assert_eq!(sent_assignments, assignments); } ); @@ -1458,7 +1457,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { let sent_approvals = sent_approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); let approvals = approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); - assert_eq!(peers, vec![peer.clone()]); + assert_eq!(peers, vec![*peer]); assert_eq!(sent_approvals, approvals); } ); @@ -1487,7 +1486,7 @@ fn race_condition_in_local_vs_remote_view_update() { // Test a small number of candidates let candidates_count = 1; let meta = BlockApprovalMeta { - hash: hash_b.clone(), + hash: hash_b, parent_hash, number: 2, candidates: vec![Default::default(); candidates_count], @@ -2099,7 +2098,7 @@ fn originator_aggression_l1() { let mut state = State::default(); state.aggression_config.resend_unfinalized_period = None; - let aggression_l1_threshold = state.aggression_config.l1_threshold.clone().unwrap(); + let aggression_l1_threshold = state.aggression_config.l1_threshold.unwrap(); let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2222,8 +2221,7 @@ fn originator_aggression_l1() { assert_eq!(sent_assignments, assignments); assert!(unsent_indices.iter() - .find(|i| &peers[**i].0 == &sent_peers[0]) - .is_some()); + .any(|i| &peers[*i].0 == &sent_peers[0])); } ); } @@ -2242,8 +2240,7 @@ fn originator_aggression_l1() { assert_eq!(sent_approvals, approvals); assert!(unsent_indices.iter() - .find(|i| &peers[**i].0 == &sent_peers[0]) - .is_some()); + .any(|i| &peers[*i].0 == &sent_peers[0])); } ); } @@ -2263,7 +2260,7 @@ fn non_originator_aggression_l1() { let mut state = state_without_reputation_delay(); state.aggression_config.resend_unfinalized_period = None; - let aggression_l1_threshold = state.aggression_config.l1_threshold.clone().unwrap(); + let aggression_l1_threshold = state.aggression_config.l1_threshold.unwrap(); let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2368,8 +2365,8 @@ fn non_originator_aggression_l2() { let mut state = state_without_reputation_delay(); state.aggression_config.resend_unfinalized_period = None; - let aggression_l1_threshold = state.aggression_config.l1_threshold.clone().unwrap(); - let aggression_l2_threshold = state.aggression_config.l2_threshold.clone().unwrap(); + let aggression_l1_threshold = state.aggression_config.l1_threshold.unwrap(); + let aggression_l2_threshold = state.aggression_config.l2_threshold.unwrap(); let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2513,8 +2510,7 @@ fn non_originator_aggression_l2() { assert_eq!(sent_assignments, assignments); assert!(unsent_indices.iter() - .find(|i| &peers[**i].0 == &sent_peers[0]) - .is_some()); + .any(|i| &peers[*i].0 == &sent_peers[0])); } ); } diff --git a/node/network/bridge/src/tx/tests.rs b/node/network/bridge/src/tx/tests.rs index dff2362b62a8..6e28a4032183 100644 --- a/node/network/bridge/src/tx/tests.rs +++ b/node/network/bridge/src/tx/tests.rs @@ -247,12 +247,7 @@ fn send_messages_to_peers() { let peer = PeerId::random(); network_handle - .connect_peer( - peer.clone(), - ValidationVersion::V1, - PeerSet::Validation, - ObservedRole::Full, - ) + .connect_peer(peer, ValidationVersion::V1, PeerSet::Validation, ObservedRole::Full) .timeout(TIMEOUT) .await .expect("Timeout does not occur"); @@ -261,12 +256,7 @@ fn send_messages_to_peers() { // so the single item sink has to be free explicitly network_handle - .connect_peer( - peer.clone(), - ValidationVersion::V1, - PeerSet::Collation, - ObservedRole::Full, - ) + .connect_peer(peer, ValidationVersion::V1, PeerSet::Collation, ObservedRole::Full) .timeout(TIMEOUT) .await .expect("Timeout does not occur"); diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 7d79d3b6a513..f3fa1694801f 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -567,7 +567,7 @@ mod tests { assert_eq!(db.num_columns(), super::columns::v3::NUM_COLUMNS as u32); let db = DbAdapter::new(db, columns::v3::ORDERED_COL); // Fill the approval voting column with test data. - v1_to_v2_fill_test_data(std::sync::Arc::new(db), approval_cfg.clone()).unwrap() + v1_to_v2_fill_test_data(std::sync::Arc::new(db), approval_cfg).unwrap() }; try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB, 4).unwrap(); @@ -576,8 +576,7 @@ mod tests { let db = Database::open(&db_cfg, db_path).unwrap(); let db = DbAdapter::new(db, columns::v4::ORDERED_COL); - v1_to_v2_sanity_check(std::sync::Arc::new(db), approval_cfg.clone(), expected_candidates) - .unwrap(); + v1_to_v2_sanity_check(std::sync::Arc::new(db), approval_cfg, expected_candidates).unwrap(); } #[test] From d7dffc929c58832d6df9555a2ce0dc137e199218 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 22 Aug 2023 18:40:52 +0300 Subject: [PATCH 102/105] Fix flaky paritydb upgrade tests - wait lock file Signed-off-by: Andrei Sandu --- node/service/src/parachains_db/upgrade.rs | 32 +++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index f3fa1694801f..f6fb12761a17 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -66,9 +66,16 @@ pub(crate) fn try_upgrade_db( // Ensure we don't loop forever below befcause of a bug. const MAX_MIGRATIONS: u32 = 30; + #[cfg(test)] + remove_file_lock(&db_path); + // Loop migrations until we reach the target version. for _ in 0..MAX_MIGRATIONS { let version = try_upgrade_db_to_next_version(db_path, db_kind)?; + + #[cfg(test)] + remove_file_lock(&db_path); + if version == target_version { return Ok(()) } @@ -403,6 +410,31 @@ fn paritydb_migrate_from_version_2_to_3(path: &Path) -> Result { Ok(3) } +/// Remove the lock file. If file is locked, it will wait up to 1s. +#[cfg(test)] +pub fn remove_file_lock(path: &std::path::Path) { + use std::{io::ErrorKind, thread::sleep, time::Duration}; + + let mut lock_path = std::path::PathBuf::from(path); + lock_path.push("lock"); + + for _ in 0..10 { + let result = std::fs::remove_file(lock_path.as_path()); + match result { + Err(error) => match error.kind() { + std::io::ErrorKind::WouldBlock => { + sleep(Duration::from_millis(100)); + continue + }, + _ => return, + }, + Ok(_) => {}, + } + } + + unreachable!("Database is locked, waited 1s for lock file: {:?}", lock_path); +} + #[cfg(test)] mod tests { use super::{ From e81cc9f38cc613f3fc9a38c9ba415dee1821b4cf Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 22 Aug 2023 18:43:04 +0300 Subject: [PATCH 103/105] remove unused Signed-off-by: Andrei Sandu --- node/service/src/parachains_db/upgrade.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index f6fb12761a17..b99f885176b0 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -422,7 +422,7 @@ pub fn remove_file_lock(path: &std::path::Path) { let result = std::fs::remove_file(lock_path.as_path()); match result { Err(error) => match error.kind() { - std::io::ErrorKind::WouldBlock => { + ErrorKind::WouldBlock => { sleep(Duration::from_millis(100)); continue }, From 4619a2b948f71b5a7bd9b2d29bda5e2ccc613972 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Tue, 22 Aug 2023 21:38:09 +0300 Subject: [PATCH 104/105] Extra logging Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 9c4c7645738d..1c75bf6ed6cc 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -2249,6 +2249,7 @@ impl ApprovalDistribution { target: LOG_TARGET, ?candidate_indices, block_hash = ?cert.block_hash, + assignment_kind = ?cert.cert.kind, "Distributing our assignment on candidates", ); From 8a4a2dbd0ed7fdf91c259f5502ca81733b231818 Mon Sep 17 00:00:00 2001 From: Andrei Sandu Date: Wed, 23 Aug 2023 15:33:38 +0300 Subject: [PATCH 105/105] fix approval import and add more rep logs Signed-off-by: Andrei Sandu --- node/network/approval-distribution/src/lib.rs | 26 +++++++------------ node/subsystem-util/src/reputation.rs | 5 ++++ 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 1c75bf6ed6cc..746a4b4dab5c 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -485,22 +485,6 @@ impl BlockEntry { .or_insert(entry) } - // Returns `true` if we have an approval for `candidate_index` from validator - // `validator_index`. - pub fn contains_approval_entry( - &self, - candidate_index: CandidateIndex, - validator_index: ValidatorIndex, - ) -> bool { - self.candidates - .get(candidate_index as usize) - .map_or(None, |candidate_entry| candidate_entry.messages.get(&validator_index)) - .map_or(false, |candidate_indices| { - self.approval_entries - .contains_key(&(validator_index, candidate_indices.clone())) - }) - } - // Returns a mutable reference of `ApprovalEntry` for `candidate_index` from validator // `validator_index`. pub fn approval_entry( @@ -1383,10 +1367,18 @@ impl State { let candidate_index = vote.candidate_index; let entry = match self.blocks.get_mut(&block_hash) { - Some(entry) if entry.contains_approval_entry(candidate_index, validator_index) => entry, + Some(entry) if entry.candidates.get(candidate_index as usize).is_some() => entry, _ => { if let Some(peer_id) = source.peer_id() { if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) { + gum::debug!( + target: LOG_TARGET, + ?peer_id, + ?block_hash, + ?validator_index, + ?candidate_index, + "Approval from a peer is out of view", + ); modify_reputation( &mut self.reputation, ctx.sender(), diff --git a/node/subsystem-util/src/reputation.rs b/node/subsystem-util/src/reputation.rs index 89e3eb64df9b..35746dd5fef3 100644 --- a/node/subsystem-util/src/reputation.rs +++ b/node/subsystem-util/src/reputation.rs @@ -25,6 +25,7 @@ use std::{collections::HashMap, time::Duration}; /// Default delay for sending reputation changes pub const REPUTATION_CHANGE_INTERVAL: Duration = Duration::from_secs(30); +const LOG_TARGET: &'static str = "parachain::reputation-aggregator"; type BatchReputationChange = HashMap; @@ -75,6 +76,10 @@ impl ReputationAggregator { peer_id: PeerId, rep: UnifiedReputationChange, ) { + if rep.cost_or_benefit() < 0 { + gum::debug!(target: LOG_TARGET, peer = ?peer_id, ?rep, "Modify reputation"); + } + if (self.send_immediately_if)(rep) { self.single_send(sender, peer_id, rep).await; } else {