From dde09ab71c7c6c0c71a3b653e26c9df7ba377925 Mon Sep 17 00:00:00 2001 From: Caio Date: Mon, 9 Nov 2020 14:00:33 -0300 Subject: [PATCH 01/34] Remove development TODO from public doc comment (#7500) --- frame/system/src/offchain.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index edb4e5775722a..25d18ac6bf255 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -445,8 +445,8 @@ pub trait AppCrypto { /// This trait adds extra bounds to `Public` and `Signature` types of the runtime /// that are necessary to use these types for signing. /// -/// TODO [#5663] Could this be just `T::Signature as traits::Verify>::Signer`? -/// Seems that this may cause issues with bounds resolution. +// TODO [#5663] Could this be just `T::Signature as traits::Verify>::Signer`? +// Seems that this may cause issues with bounds resolution. pub trait SigningTypes: crate::Trait { /// A public key that is capable of identifing `AccountId`s. /// From bf78c1655b7e49503a11cb070235089ea1e2455e Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Mon, 9 Nov 2020 20:26:24 +0100 Subject: [PATCH 02/34] refactor subtrait/elevated trait as not needed (#7497) --- frame/balances/src/lib.rs | 74 +++++---------------------------------- 1 file changed, 9 insertions(+), 65 deletions(-) diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 7ca6fd1e78091..141a360f7e180 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -704,7 +704,7 @@ impl, I: Instance> Module { // of the inner member. mod imbalances { use super::{ - result, Subtrait, DefaultInstance, Imbalance, Trait, Zero, Instance, Saturating, + result, DefaultInstance, Imbalance, Trait, Zero, Instance, Saturating, StorageValue, TryDrop, }; use sp_std::mem; @@ -712,9 +712,9 @@ mod imbalances { /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been created without any equal and opposite accounting. #[must_use] - pub struct PositiveImbalance, I: Instance=DefaultInstance>(T::Balance); + pub struct PositiveImbalance, I: Instance=DefaultInstance>(T::Balance); - impl, I: Instance> PositiveImbalance { + impl, I: Instance> PositiveImbalance { /// Create a new positive imbalance from a balance. pub fn new(amount: T::Balance) -> Self { PositiveImbalance(amount) @@ -724,9 +724,9 @@ mod imbalances { /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been destroyed without any equal and opposite accounting. #[must_use] - pub struct NegativeImbalance, I: Instance=DefaultInstance>(T::Balance); + pub struct NegativeImbalance, I: Instance=DefaultInstance>(T::Balance); - impl, I: Instance> NegativeImbalance { + impl, I: Instance> NegativeImbalance { /// Create a new negative imbalance from a balance. pub fn new(amount: T::Balance) -> Self { NegativeImbalance(amount) @@ -835,81 +835,25 @@ mod imbalances { } } - impl, I: Instance> Drop for PositiveImbalance { + impl, I: Instance> Drop for PositiveImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { - , I>>::mutate( + >::mutate( |v| *v = v.saturating_add(self.0) ); } } - impl, I: Instance> Drop for NegativeImbalance { + impl, I: Instance> Drop for NegativeImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { - , I>>::mutate( + >::mutate( |v| *v = v.saturating_sub(self.0) ); } } } -// TODO: #2052 -// Somewhat ugly hack in order to gain access to module's `increase_total_issuance_by` -// using only the Subtrait (which defines only the types that are not dependent -// on Positive/NegativeImbalance). Subtrait must be used otherwise we end up with a -// circular dependency with Trait having some types be dependent on PositiveImbalance -// and PositiveImbalance itself depending back on Trait for its Drop impl (and thus -// its type declaration). -// This works as long as `increase_total_issuance_by` doesn't use the Imbalance -// types (basically for charging fees). -// This should eventually be refactored so that the type item that -// depends on the Imbalance type (DustRemoval) is placed in its own pallet. -struct ElevatedTrait, I: Instance>(T, I); -impl, I: Instance> Clone for ElevatedTrait { - fn clone(&self) -> Self { unimplemented!() } -} -impl, I: Instance> PartialEq for ElevatedTrait { - fn eq(&self, _: &Self) -> bool { unimplemented!() } -} -impl, I: Instance> Eq for ElevatedTrait {} -impl, I: Instance> frame_system::Trait for ElevatedTrait { - type BaseCallFilter = T::BaseCallFilter; - type Origin = T::Origin; - type Call = T::Call; - type Index = T::Index; - type BlockNumber = T::BlockNumber; - type Hash = T::Hash; - type Hashing = T::Hashing; - type AccountId = T::AccountId; - type Lookup = T::Lookup; - type Header = T::Header; - type Event = (); - type BlockHashCount = T::BlockHashCount; - type MaximumBlockWeight = T::MaximumBlockWeight; - type DbWeight = T::DbWeight; - type BlockExecutionWeight = T::BlockExecutionWeight; - type ExtrinsicBaseWeight = T::ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = T::MaximumBlockWeight; - type MaximumBlockLength = T::MaximumBlockLength; - type AvailableBlockRatio = T::AvailableBlockRatio; - type Version = T::Version; - type PalletInfo = T::PalletInfo; - type OnNewAccount = T::OnNewAccount; - type OnKilledAccount = T::OnKilledAccount; - type AccountData = T::AccountData; - type SystemWeightInfo = T::SystemWeightInfo; -} -impl, I: Instance> Trait for ElevatedTrait { - type Balance = T::Balance; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = T::ExistentialDeposit; - type AccountStore = T::AccountStore; - type WeightInfo = >::WeightInfo; - type MaxLocks = T::MaxLocks; -} - impl, I: Instance> Currency for Module where T::Balance: MaybeSerializeDeserialize + Debug { From c9bde3d908a1995c99d7c241c72223bf6fe56ad7 Mon Sep 17 00:00:00 2001 From: Antoine Le Calvez Date: Tue, 10 Nov 2020 14:17:30 +0100 Subject: [PATCH 03/34] Fix comments of indices pallet events (#7511) Arguments for IndexAssigned and IndexFrozen were inverted in comments. --- frame/indices/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index aa645d0cb9ebc..fd2eb956f9231 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -80,11 +80,11 @@ decl_event!( ::AccountId, ::AccountIndex { - /// A account index was assigned. \[who, index\] + /// A account index was assigned. \[index, who\] IndexAssigned(AccountId, AccountIndex), /// A account index has been freed up (unassigned). \[index\] IndexFreed(AccountIndex), - /// A account index has been frozen to its current account ID. \[who, index\] + /// A account index has been frozen to its current account ID. \[index, who\] IndexFrozen(AccountIndex, AccountId), } ); From f8bfe224b0a0143455f706bd96da3104141d0c43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 11 Nov 2020 09:57:14 +0100 Subject: [PATCH 04/34] Skip slot lenience on first block in BABE (#7515) The genesis header doesn't have the BABE pre-digest and we insert `0` as slot number. The slot lenience calculation will return the maximum in this situation. Besides returning the maximum which is not bad at all, it also prints some a debug message that can be confusing in the first moment. To prevent printing this debug message, we now just return early when we see that the parent block is the genesis block. --- client/consensus/babe/src/lib.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 948959e96495c..dce2529201400 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -669,12 +669,17 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot fn proposing_remaining_duration( &self, - head: &B::Header, + parent_head: &B::Header, slot_info: &SlotInfo, ) -> Option { let slot_remaining = self.slot_remaining_duration(slot_info); - let parent_slot = match find_pre_digest::(head) { + // If parent is genesis block, we don't require any lenience factor. + if parent_head.number().is_zero() { + return Some(slot_remaining) + } + + let parent_slot = match find_pre_digest::(parent_head) { Err(_) => return Some(slot_remaining), Ok(d) => d.slot_number(), }; @@ -682,7 +687,8 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot if let Some(slot_lenience) = sc_consensus_slots::slot_lenience_exponential(parent_slot, slot_info) { - debug!(target: "babe", + debug!( + target: "babe", "No block for {} slots. Applying exponential lenience of {}s", slot_info.number.saturating_sub(parent_slot + 1), slot_lenience.as_secs(), @@ -697,8 +703,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot /// Extract the BABE pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -pub fn find_pre_digest(header: &B::Header) -> Result> -{ +pub fn find_pre_digest(header: &B::Header) -> Result> { // genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { From 095d0f06a42b66a92f28d097499a69e3f170d59d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20H=C3=A4ggblad?= Date: Wed, 11 Nov 2020 14:29:19 +0100 Subject: [PATCH 05/34] slots: incrementally backoff claiming slots if finality lags behind (#7186) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * babe: backoff authoring blocks when finality lags * babe: move backoff authoring params to default constructor * babe: deduplicate the test a bit * babe: set backoff constants in service * babe: use better names for backoff authoring block parameters * babe: remove last unwrap * babe: slight style tweak * babe: fix comment * slots: move backoff block authorship logic to SimpleSlotWorker * aura: append SlotInfo in on_slot * slots: use the correct types for parameters * slots: fix review comments * aura: add missing backoff authoring blocks parameters * slots: add comments for default values * slots: add additional checks in test * slots: update implementation for new master * slots: revert the change to SlotInfo * Fix review comments * slots: rework unit tests for backing off claiming slots * slots: add test for asymptotic behaviour for slot claims * slots: address review comments * slots: add test for max_interval * slots: add assertion for intervals between between claimed slots * slots: remove rustfmt directive * slots: another attempt at explaining authoring_rate * slots: up unfinalized_slack to 50 by default * slots: add tests for time to reach max_interval * slots: fix typo in comments * Apply suggestions from code review Co-authored-by: Bastian Köcher * slots: additional tweaks to comments and info calls * slots: rename to BackoffAuthoringOnFinalizedHeadLagging * slots: make the backing off strategy generic * Apply suggestions from code review Co-authored-by: Bastian Köcher * slots: implement backoff trait for () for simplicity * slots: move logging inside backing off function to make it more specific * aura: add missing function parameter Co-authored-by: Bastian Köcher --- Cargo.lock | 2 + bin/node-template/node/src/service.rs | 4 +- bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/service.rs | 3 + client/consensus/aura/src/lib.rs | 40 ++- client/consensus/babe/src/lib.rs | 38 ++- client/consensus/babe/src/tests.rs | 2 + client/consensus/slots/Cargo.toml | 1 + client/consensus/slots/src/lib.rs | 469 +++++++++++++++++++++++++- 9 files changed, 540 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aff2c99917660..62cb03dedafa3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3827,6 +3827,7 @@ dependencies = [ "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", + "sc-consensus-slots", "sc-finality-grandpa", "sc-keystore", "sc-network", @@ -6881,6 +6882,7 @@ dependencies = [ "sc-telemetry", "sp-api", "sp-application-crypto", + "sp-arithmetic", "sp-blockchain", "sp-consensus", "sp-consensus-slots", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 90187061c9cf7..d85de7c840dfd 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -111,6 +111,7 @@ pub fn new_full(config: Configuration) -> Result { let role = config.role.clone(); let force_authoring = config.force_authoring; + let backoff_authoring_blocks: Option<()> = None; let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); @@ -155,7 +156,7 @@ pub fn new_full(config: Configuration) -> Result { let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( + let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _,_>( sc_consensus_aura::slot_duration(&*client)?, client.clone(), select_chain, @@ -164,6 +165,7 @@ pub fn new_full(config: Configuration) -> Result { network.clone(), inherent_data_providers.clone(), force_authoring, + backoff_authoring_blocks, keystore_container.sync_keystore(), can_author_with, )?; diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index e396b2dcefff5..26a23ce36ecc0 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -64,6 +64,7 @@ sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } sc-network = { version = "0.8.0", path = "../../../client/network" } +sc-consensus-slots = { version = "0.8.0", path = "../../../client/consensus/slots" } sc-consensus-babe = { version = "0.8.0", path = "../../../client/consensus/babe" } grandpa = { version = "0.8.0", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } sc-client-db = { version = "0.8.0", default-features = false, path = "../../../client/db" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index ecf50dc14634b..3bc406b84fc67 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -204,6 +204,8 @@ pub fn new_full_base( let role = config.role.clone(); let force_authoring = config.force_authoring; + let backoff_authoring_blocks = + Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default()); let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); @@ -249,6 +251,7 @@ pub fn new_full_base( sync_oracle: network.clone(), inherent_data_providers: inherent_data_providers.clone(), force_authoring, + backoff_authoring_blocks, babe_link, can_author_with, }; diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 5013c1813b685..97bfb217b9396 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -59,7 +59,7 @@ use sp_core::crypto::Public; use sp_application_crypto::{AppKey, AppPublic}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, - Justification, + traits::NumberFor, Justification, }; use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor, Zero, Member}; use sp_api::ProvideRuntimeApi; @@ -73,6 +73,7 @@ use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_consensus_slots::{ CheckedHeader, SlotInfo, SlotCompatible, StorageChanges, check_equivocation, + BackoffAuthoringBlocksStrategy, }; use sp_api::ApiExt; @@ -138,7 +139,7 @@ impl SlotCompatible for AuraSlotCompatible { } /// Start the aura worker. The returned future should be run in a futures executor. -pub fn start_aura( +pub fn start_aura( slot_duration: SlotDuration, client: Arc, select_chain: SC, @@ -147,11 +148,12 @@ pub fn start_aura( sync_oracle: SO, inherent_data_providers: InherentDataProviders, force_authoring: bool, + backoff_authoring_blocks: Option, keystore: SyncCryptoStorePtr, can_author_with: CAW, ) -> Result, sp_consensus::Error> where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + Send + Sync, + C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, SC: SelectChain, E: Environment + Send + Sync + 'static, @@ -163,6 +165,7 @@ pub fn start_aura( Error: std::error::Error + Send + From + 'static, SO: SyncOracle + Send + Sync + Clone, CAW: CanAuthorWith + Send, + BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { let worker = AuraWorker { client, @@ -171,6 +174,7 @@ pub fn start_aura( keystore, sync_oracle: sync_oracle.clone(), force_authoring, + backoff_authoring_blocks, _key_type: PhantomData::

, }; register_aura_inherent_data_provider( @@ -188,20 +192,22 @@ pub fn start_aura( )) } -struct AuraWorker { +struct AuraWorker { client: Arc, block_import: Arc>, env: E, keystore: SyncCryptoStorePtr, sync_oracle: SO, force_authoring: bool, + backoff_authoring_blocks: Option, _key_type: PhantomData

, } -impl sc_consensus_slots::SimpleSlotWorker for AuraWorker +impl sc_consensus_slots::SimpleSlotWorker + for AuraWorker where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync, + C: ProvideRuntimeApi + BlockOf + ProvideCache + HeaderBackend + Sync, C::Api: AuraApi>, E: Environment, E::Proposer: Proposer>, @@ -210,6 +216,7 @@ where P::Public: AppPublic + Public + Member + Encode + Decode + Hash, P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, SO: SyncOracle + Send + Clone, + BS: BackoffAuthoringBlocksStrategy> + Send + 'static, Error: std::error::Error + Send + From + 'static, { type BlockImport = I; @@ -316,6 +323,21 @@ where self.force_authoring } + fn should_backoff(&self, slot_number: u64, chain_head: &B::Header) -> bool { + if let Some(ref strategy) = self.backoff_authoring_blocks { + if let Ok(chain_head_slot) = find_pre_digest::(chain_head) { + return strategy.should_backoff( + *chain_head.number(), + chain_head_slot, + self.client.info().finalized_number, + slot_number, + self.logging_target(), + ); + } + } + false + } + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { &mut self.sync_oracle } @@ -863,7 +885,7 @@ mod tests { use sp_keyring::sr25519::Keyring; use sc_client_api::BlockchainEvents; use sp_consensus_aura::sr25519::AuthorityPair; - use sc_consensus_slots::SimpleSlotWorker; + use sc_consensus_slots::{SimpleSlotWorker, BackoffAuthoringOnFinalizedHeadLagging}; use std::task::Poll; use sc_block_builder::BlockBuilderProvider; use sp_runtime::traits::Header as _; @@ -1012,7 +1034,7 @@ mod tests { &inherent_data_providers, slot_duration.get() ).expect("Registers aura inherent data provider"); - aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _>( + aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _, _>( slot_duration, client.clone(), select_chain, @@ -1021,6 +1043,7 @@ mod tests { DummyOracle, inherent_data_providers, false, + Some(BackoffAuthoringOnFinalizedHeadLagging::default()), keystore, sp_consensus::AlwaysCanAuthor, ).expect("Starts aura")); @@ -1081,6 +1104,7 @@ mod tests { keystore: keystore.into(), sync_oracle: DummyOracle.clone(), force_authoring: false, + backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), _key_type: PhantomData::, }; diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index dce2529201400..c672440d114b5 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -114,6 +114,7 @@ use log::{debug, info, log, trace, warn}; use prometheus_endpoint::Registry; use sc_consensus_slots::{ SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, + BackoffAuthoringBlocksStrategy, }; use sc_consensus_epochs::{ descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, @@ -354,7 +355,7 @@ impl std::ops::Deref for Config { } /// Parameters for BABE. -pub struct BabeParams { +pub struct BabeParams { /// The keystore that manages the keys of the node. pub keystore: SyncCryptoStorePtr, @@ -381,6 +382,9 @@ pub struct BabeParams { /// Force authoring of blocks even if we are offline pub force_authoring: bool, + /// Strategy and parameters for backing off block production. + pub backoff_authoring_blocks: Option, + /// The source of timestamps for relative slots pub babe_link: BabeLink, @@ -389,7 +393,7 @@ pub struct BabeParams { } /// Start the babe worker. -pub fn start_babe(BabeParams { +pub fn start_babe(BabeParams { keystore, client, select_chain, @@ -398,9 +402,10 @@ pub fn start_babe(BabeParams { sync_oracle, inherent_data_providers, force_authoring, + backoff_authoring_blocks, babe_link, can_author_with, -}: BabeParams) -> Result< +}: BabeParams) -> Result< BabeWorker, sp_consensus::Error, > where @@ -416,6 +421,7 @@ pub fn start_babe(BabeParams { Error: std::error::Error + Send + From + From + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, CAW: CanAuthorWith + Send + 'static, + BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { let config = babe_link.config; let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); @@ -426,6 +432,7 @@ pub fn start_babe(BabeParams { env, sync_oracle: sync_oracle.clone(), force_authoring, + backoff_authoring_blocks, keystore, epoch_changes: babe_link.epoch_changes.clone(), slot_notification_sinks: slot_notification_sinks.clone(), @@ -490,19 +497,22 @@ impl futures::Future for BabeWorker { /// Slot notification sinks. type SlotNotificationSinks = Arc::Hash, NumberFor, Epoch>)>>>>; -struct BabeSlotWorker { +struct BabeSlotWorker { client: Arc, block_import: Arc>, env: E, sync_oracle: SO, force_authoring: bool, + backoff_authoring_blocks: Option, keystore: SyncCryptoStorePtr, epoch_changes: SharedEpochChanges, slot_notification_sinks: SlotNotificationSinks, config: Config, } -impl sc_consensus_slots::SimpleSlotWorker for BabeSlotWorker where +impl sc_consensus_slots::SimpleSlotWorker + for BabeSlotWorker +where B: BlockT, C: ProvideRuntimeApi + ProvideCache + @@ -513,6 +523,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, SO: SyncOracle + Send + Clone, + BS: BackoffAuthoringBlocksStrategy>, Error: std::error::Error + Send + From + From + 'static, { type EpochData = ViableEpochDescriptor, Epoch>; @@ -657,6 +668,23 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot self.force_authoring } + fn should_backoff(&self, slot_number: u64, chain_head: &B::Header) -> bool { + if let Some(ref strategy) = self.backoff_authoring_blocks { + if let Ok(chain_head_slot) = find_pre_digest::(chain_head) + .map(|digest| digest.slot_number()) + { + return strategy.should_backoff( + *chain_head.number(), + chain_head_slot, + self.client.info().finalized_number, + slot_number, + self.logging_target(), + ); + } + } + false + } + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { &mut self.sync_oracle } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 6b0f5870ba53d..b31699d13e0c8 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -33,6 +33,7 @@ use sp_consensus_babe::{ make_transcript, make_transcript_data, }; +use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor, @@ -434,6 +435,7 @@ fn run_one_test( sync_oracle: DummyOracle, inherent_data_providers: data.inherent_data_providers.clone(), force_authoring: false, + backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), babe_link: data.link.clone(), keystore, can_author_with: sp_consensus::AlwaysCanAuthor, diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index a13a712fe76b7..d07ef49835b2b 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -19,6 +19,7 @@ sc-client-api = { version = "2.0.0", path = "../../api" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } +sp-arithmetic = { version = "2.0.0", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } sp-consensus-slots = { version = "0.8.0", path = "../../../primitives/consensus/slots" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 681d4a6273ed9..d8601a7c12c68 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -29,18 +29,21 @@ pub use slots::SlotInfo; use slots::Slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; +use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::{Instant, Duration}}; use codec::{Decode, Encode}; -use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData, RecordProof}; use futures::{prelude::*, future::{self, Either}}; use futures_timer::Delay; -use sp_inherents::{InherentData, InherentDataProviders}; use log::{debug, error, info, warn}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; +use parking_lot::Mutex; use sp_api::{ProvideRuntimeApi, ApiRef}; -use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::{Instant, Duration}}; +use sp_arithmetic::traits::BaseArithmetic; +use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData, RecordProof}; +use sp_inherents::{InherentData, InherentDataProviders}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, HashFor, NumberFor} +}; use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; -use parking_lot::Mutex; /// The changes that need to applied to the storage to create the state for a block. /// @@ -158,6 +161,16 @@ pub trait SimpleSlotWorker { /// Whether to force authoring if offline. fn force_authoring(&self) -> bool; + /// Returns whether the block production should back off. + /// + /// By default this function always returns `false`. + /// + /// An example strategy that back offs if the finalized head is lagging too much behind the tip + /// is implemented by [`BackoffAuthoringOnFinalizedHeadLagging`]. + fn should_backoff(&self, _slot_number: u64, _chain_head: &B::Header) -> bool { + false + } + /// Returns a handle to a `SyncOracle`. fn sync_oracle(&mut self) -> &mut Self::SyncOracle; @@ -249,6 +262,10 @@ pub trait SimpleSlotWorker { Some(claim) => claim, }; + if self.should_backoff(slot_number, &chain_head) { + return Box::pin(future::ready(None)); + } + debug!( target: self.logging_target(), "Starting authorship at slot {}; timestamp = {}", @@ -583,9 +600,110 @@ pub fn slot_lenience_linear(parent_slot: u64, slot_info: &SlotInfo) -> Option { + /// Returns true if we should backoff authoring new blocks. + fn should_backoff( + &self, + chain_head_number: N, + chain_head_slot: u64, + finalized_number: N, + slow_now: u64, + logging_target: &str, + ) -> bool; +} + +/// A simple default strategy for how to decide backing off authoring blocks if the number of +/// unfinalized blocks grows too large. +#[derive(Clone)] +pub struct BackoffAuthoringOnFinalizedHeadLagging { + /// The max interval to backoff when authoring blocks, regardless of delay in finality. + pub max_interval: N, + /// The number of unfinalized blocks allowed before starting to consider to backoff authoring + /// blocks. Note that depending on the value for `authoring_bias`, there might still be an + /// additional wait until block authorship starts getting declined. + pub unfinalized_slack: N, + /// Scales the backoff rate. A higher value effectively means we backoff slower, taking longer + /// time to reach the maximum backoff as the unfinalized head of chain grows. + pub authoring_bias: N, +} + +/// These parameters is supposed to be some form of sensible defaults. +impl Default for BackoffAuthoringOnFinalizedHeadLagging { + fn default() -> Self { + Self { + // Never wait more than 100 slots before authoring blocks, regardless of delay in + // finality. + max_interval: 100.into(), + // Start to consider backing off block authorship once we have 50 or more unfinalized + // blocks at the head of the chain. + unfinalized_slack: 50.into(), + // A reasonable default for the authoring bias, or reciprocal interval scaling, is 2. + // Effectively meaning that consider the unfinalized head suffix length to grow half as + // fast as in actuality. + authoring_bias: 2.into(), + } + } +} + +impl BackoffAuthoringBlocksStrategy for BackoffAuthoringOnFinalizedHeadLagging +where + N: BaseArithmetic + Copy +{ + fn should_backoff( + &self, + chain_head_number: N, + chain_head_slot: u64, + finalized_number: N, + slot_now: u64, + logging_target: &str, + ) -> bool { + // This should not happen, but we want to keep the previous behaviour if it does. + if slot_now <= chain_head_slot { + return false; + } + + let unfinalized_block_length = chain_head_number - finalized_number; + let interval = unfinalized_block_length.saturating_sub(self.unfinalized_slack) + / self.authoring_bias; + let interval = interval.min(self.max_interval); + + // We're doing arithmetic between block and slot numbers. + let interval: u64 = interval.unique_saturated_into(); + + // If interval is nonzero we backoff if the current slot isn't far enough ahead of the chain + // head. + if slot_now <= chain_head_slot + interval { + info!( + target: logging_target, + "Backing off claiming new slot for block authorship: finality is lagging.", + ); + true + } else { + false + } + } +} + +impl BackoffAuthoringBlocksStrategy for () { + fn should_backoff( + &self, + _chain_head_number: N, + _chain_head_slot: u64, + _finalized_number: N, + _slot_now: u64, + _logging_target: &str, + ) -> bool { + false + } +} + #[cfg(test)] mod test { use std::time::{Duration, Instant}; + use crate::{BackoffAuthoringOnFinalizedHeadLagging, BackoffAuthoringBlocksStrategy}; + use substrate_test_runtime_client::runtime::Block; + use sp_api::NumberFor; const SLOT_DURATION: Duration = Duration::from_millis(6000); @@ -644,4 +762,343 @@ mod test { Some(SLOT_DURATION * 2u32.pow(7)), ); } + + #[derive(PartialEq, Debug)] + struct HeadState { + head_number: NumberFor, + head_slot: u64, + slot_now: NumberFor, + } + + impl HeadState { + fn author_block(&mut self) { + // Add a block to the head, and set latest slot to the current + self.head_number += 1; + self.head_slot = self.slot_now; + // Advance slot to next + self.slot_now += 1; + } + + fn dont_author_block(&mut self) { + self.slot_now += 1; + } + } + + #[test] + fn should_never_backoff_when_head_not_advancing() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let head_number = 1; + let head_slot = 1; + let finalized_number = 1; + let slot_now = 2; + + let should_backoff: Vec = (slot_now..1000) + .map(|s| strategy.should_backoff(head_number, head_slot, finalized_number, s, "slots")) + .collect(); + + // Should always be false, since the head isn't advancing + let expected: Vec = (slot_now..1000).map(|_| false).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_stop_authoring_if_blocks_are_still_produced_when_finality_stalled() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let mut head_number = 1; + let mut head_slot = 1; + let finalized_number = 1; + let slot_now = 2; + + let should_backoff: Vec = (slot_now..300) + .map(move |s| { + let b = strategy.should_backoff( + head_number, + head_slot, + finalized_number, + s, + "slots", + ); + // Chain is still advancing (by someone else) + head_number += 1; + head_slot = s; + b + }) + .collect(); + + // Should always be true after a short while, since the chain is advancing but finality is stalled + let expected: Vec = (slot_now..300).map(|s| s > 8).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_never_backoff_if_max_interval_is_reached() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + // The limit `max_interval` is used when the unfinalized chain grows to + // `max_interval * authoring_bias + unfinalized_slack`, + // which for the above parameters becomes + // 100 * 2 + 5 = 205. + // Hence we trigger this with head_number > finalized_number + 205. + let head_number = 207; + let finalized_number = 1; + + // The limit is then used once the current slot is `max_interval` ahead of slot of the head. + let head_slot = 1; + let slot_now = 2; + let max_interval = strategy.max_interval; + + let should_backoff: Vec = (slot_now..200) + .map(|s| strategy.should_backoff(head_number, head_slot, finalized_number, s, "slots")) + .collect(); + + // Should backoff (true) until we are `max_interval` number of slots ahead of the chain + // head slot, then we never backoff (false). + let expected: Vec = (slot_now..200).map(|s| s <= max_interval + head_slot).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_backoff_authoring_when_finality_stalled() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let finalized_number = 2; + let mut head_state = HeadState { + head_number: 4, + head_slot: 10, + slot_now: 11, + }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot, + finalized_number, + head_state.slot_now, + "slots", + ) + }; + + let backoff: Vec = (head_state.slot_now..200) + .map(|_| { + if should_backoff(&head_state) { + head_state.dont_author_block(); + true + } else { + head_state.author_block(); + false + } + }) + .collect(); + + // Gradually start to backoff more and more frequently + let expected = [ + false, false, false, false, false, // no effect + true, false, + true, false, // 1:1 + true, true, false, + true, true, false, // 2:1 + true, true, true, false, + true, true, true, false, // 3:1 + true, true, true, true, false, + true, true, true, true, false, // 4:1 + true, true, true, true, true, false, + true, true, true, true, true, false, // 5:1 + true, true, true, true, true, true, false, + true, true, true, true, true, true, false, // 6:1 + true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, false, // 7:1 + true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, false, // 8:1 + true, true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, true, false, // 9:1 + true, true, true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, true, true, false, // 10:1 + true, true, true, true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, true, true, true, false, // 11:1 + true, true, true, true, true, true, true, true, true, true, true, true, false, + true, true, true, true, true, true, true, true, true, true, true, true, false, // 12:1 + true, true, true, true, + ]; + + assert_eq!(backoff, expected); + } + + #[test] + fn should_never_wait_more_than_max_interval() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let finalized_number = 2; + let starting_slot = 11; + let mut head_state = HeadState { + head_number: 4, + head_slot: 10, + slot_now: starting_slot, + }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot, + finalized_number, + head_state.slot_now, + "slots", + ) + }; + + let backoff: Vec = (head_state.slot_now..40000) + .map(|_| { + if should_backoff(&head_state) { + head_state.dont_author_block(); + true + } else { + head_state.author_block(); + false + } + }) + .collect(); + + let slots_claimed: Vec = backoff + .iter() + .enumerate() + .filter(|&(_i, x)| x == &false) + .map(|(i, _x)| i + starting_slot as usize) + .collect(); + + let last_slot = backoff.len() + starting_slot as usize; + let mut last_two_claimed = slots_claimed.iter().rev().take(2); + + // Check that we claimed all the way to the end. Check two slots for when we have an uneven + // number of slots_claimed. + let expected_distance = param.max_interval as usize + 1; + assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92); + assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92 + expected_distance); + + let intervals: Vec<_> = slots_claimed + .windows(2) + .map(|x| x[1] - x[0]) + .collect(); + + // The key thing is that the distance between claimed slots is capped to `max_interval + 1` + // assert_eq!(max_observed_interval, Some(&expected_distance)); + assert_eq!(intervals.iter().max(), Some(&expected_distance)); + + // But lets assert all distances, which we expect to grow linearly until `max_interval + 1` + let expected_intervals: Vec<_> = (0..497) + .map(|i| (i/2).max(1).min(expected_distance) ) + .collect(); + + assert_eq!(intervals, expected_intervals); + } + + fn run_until_max_interval(param: BackoffAuthoringOnFinalizedHeadLagging) -> (u64, u64) { + let finalized_number = 0; + let mut head_state = HeadState { + head_number: 0, + head_slot: 0, + slot_now: 1, + }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot, + finalized_number, + head_state.slot_now, + "slots", + ) + }; + + // Number of blocks until we reach the max interval + let block_for_max_interval + = param.max_interval * param.authoring_bias + param.unfinalized_slack; + + while head_state.head_number < block_for_max_interval { + if should_backoff(&head_state) { + head_state.dont_author_block(); + } else { + head_state.author_block(); + } + } + + let slot_time = 6; + let time_to_reach_limit = slot_time * head_state.slot_now; + (block_for_max_interval, time_to_reach_limit) + } + + // Denoting + // C: unfinalized_slack + // M: authoring_bias + // X: max_interval + // then the number of slots to reach the max interval can be computed from + // (start_slot + C) + M * sum(n, 1, X) + // or + // (start_slot + C) + M * X*(X+1)/2 + fn expected_time_to_reach_max_interval( + param: &BackoffAuthoringOnFinalizedHeadLagging + ) -> (u64, u64) { + let c = param.unfinalized_slack; + let m = param.authoring_bias; + let x = param.max_interval; + let slot_time = 6; + + let block_for_max_interval = x * m + c; + + // The 1 is because we start at slot_now = 1. + let expected_number_of_slots = (1 + c) + m * x * (x + 1) / 2; + let time_to_reach = expected_number_of_slots * slot_time; + + (block_for_max_interval, time_to_reach) + } + + #[test] + fn time_to_reach_upper_bound_for_smaller_slack() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + let expected = expected_time_to_reach_max_interval(¶m); + let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param); + assert_eq!((block_for_max_interval, time_to_reach_limit), expected); + // Note: 16 hours is 57600 sec + assert_eq!((block_for_max_interval, time_to_reach_limit), (205, 60636)); + } + + #[test] + fn time_to_reach_upper_bound_for_larger_slack() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 50, + authoring_bias: 2, + }; + let expected = expected_time_to_reach_max_interval(¶m); + let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param); + assert_eq!((block_for_max_interval, time_to_reach_limit), expected); + assert_eq!((block_for_max_interval, time_to_reach_limit), (250, 60906)); + } } From 71d027841a57a6bfa0549218923e78477cf96a0a Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Nov 2020 18:19:27 +0100 Subject: [PATCH 06/34] Export app-crypto specific keystore functions (#7489) * Export app-crypto specific keystore functions * Also add back the insert function * Switch KeystoreContainer to an enum * Only export the bare minimal for LocalKeystore and fix service compile * fix: should return Arc * Add docs stating that functions only available in local keystore * Remove insert and generate functions * fix: generate function should be available in test * Add keypair function to trait * Revert "Add keypair function to trait" This reverts commit ad921b09ca73d3c09298e3a51b562ef8e0067781. * Add note for local_keystore function in service --- client/keystore/src/local.rs | 94 +++++++++++++++++------------------ client/service/src/builder.rs | 37 ++++++++++---- 2 files changed, 71 insertions(+), 60 deletions(-) diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 856327d46f6ea..e0b95a08d5caf 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -38,7 +38,7 @@ use sp_keystore::{ SyncCryptoStore, vrf::{VRFTranscriptData, VRFSignature, make_transcript}, }; -use sp_application_crypto::{ed25519, sr25519, ecdsa}; +use sp_application_crypto::{ed25519, sr25519, ecdsa, AppPair, AppKey, IsWrappedBy}; use crate::{Result, Error}; @@ -57,6 +57,14 @@ impl LocalKeystore { let inner = KeystoreInner::new_in_memory(); Self(RwLock::new(inner)) } + + /// Get a key pair for the given public key. + /// + /// This function is only available for a local keystore. If your application plans to work with + /// remote keystores, you do not want to depend on it. + pub fn key_pair(&self, public: &::Public) -> Result { + self.0.read().key_pair::(public) + } } #[async_trait] @@ -470,6 +478,11 @@ impl KeystoreInner { Ok(public_keys) } + + /// Get a key pair for the given public key. + pub fn key_pair(&self, public: &::Public) -> Result { + self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID).map(Into::into) + } } @@ -479,47 +492,32 @@ mod tests { use tempfile::TempDir; use sp_core::{ Pair, - crypto::{IsWrappedBy, Ss58Codec}, + crypto::Ss58Codec, testing::SR25519, }; - use sp_application_crypto::{ed25519, sr25519, AppPublic, AppKey, AppPair}; + use sp_application_crypto::{ed25519, sr25519, AppPublic}; use std::{ fs, str::FromStr, }; - /// Generate a new key. - /// - /// Places it into the file system store. - fn generate(store: &KeystoreInner) -> Result { - store.generate_by_type::(Pair::ID).map(Into::into) - } - - /// Create a new key from seed. - /// - /// Does not place it into the file system store. - fn insert_ephemeral_from_seed(store: &mut KeystoreInner, seed: &str) -> Result { - store.insert_ephemeral_from_seed_by_type::(seed, Pair::ID).map(Into::into) - } + impl KeystoreInner { + fn insert_ephemeral_from_seed(&mut self, seed: &str) -> Result { + self.insert_ephemeral_from_seed_by_type::(seed, Pair::ID).map(Into::into) + } - /// Get public keys of all stored keys that match the key type. - /// - /// This will just use the type of the public key (a list of which to be returned) in order - /// to determine the key type. Unless you use a specialized application-type public key, then - /// this only give you keys registered under generic cryptography, and will not return keys - /// registered under the application type. - fn public_keys(store: &KeystoreInner) -> Result> { - store.raw_public_keys(Public::ID) - .map(|v| { - v.into_iter() - .map(|k| Public::from_slice(k.as_slice())) - .collect() - }) - } + fn public_keys(&self) -> Result> { + self.raw_public_keys(Public::ID) + .map(|v| { + v.into_iter() + .map(|k| Public::from_slice(k.as_slice())) + .collect() + }) + } - /// Get a key pair for the given public key. - fn key_pair(store: &KeystoreInner, public: &::Public) -> Result { - store.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID).map(Into::into) + fn generate(&self) -> Result { + self.generate_by_type::(Pair::ID).map(Into::into) + } } #[test] @@ -527,14 +525,14 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - assert!(public_keys::(&store).unwrap().is_empty()); + assert!(store.public_keys::().unwrap().is_empty()); - let key: ed25519::AppPair = generate(&store).unwrap(); - let key2: ed25519::AppPair = key_pair(&store, &key.public()).unwrap(); + let key: ed25519::AppPair = store.generate().unwrap(); + let key2: ed25519::AppPair = store.key_pair(&key.public()).unwrap(); assert_eq!(key.public(), key2.public()); - assert_eq!(public_keys::(&store).unwrap()[0], key.public()); + assert_eq!(store.public_keys::().unwrap()[0], key.public()); } #[test] @@ -542,8 +540,7 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let mut store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - let pair: ed25519::AppPair = insert_ephemeral_from_seed( - &mut store, + let pair: ed25519::AppPair = store.insert_ephemeral_from_seed( "0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc" ).unwrap(); assert_eq!( @@ -554,7 +551,7 @@ mod tests { drop(store); let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); // Keys generated from seed should not be persisted! - assert!(key_pair::(&store, &pair.public()).is_err()); + assert!(store.key_pair::(&pair.public()).is_err()); } #[test] @@ -566,15 +563,15 @@ mod tests { Some(FromStr::from_str(password.as_str()).unwrap()), ).unwrap(); - let pair: ed25519::AppPair = generate(&store).unwrap(); + let pair: ed25519::AppPair = store.generate().unwrap(); assert_eq!( pair.public(), - key_pair::(&store, &pair.public()).unwrap().public(), + store.key_pair::(&pair.public()).unwrap().public(), ); // Without the password the key should not be retrievable let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - assert!(key_pair::(&store, &pair.public()).is_err()); + assert!(store.key_pair::(&pair.public()).is_err()); let store = KeystoreInner::open( temp_dir.path(), @@ -582,7 +579,7 @@ mod tests { ).unwrap(); assert_eq!( pair.public(), - key_pair::(&store, &pair.public()).unwrap().public(), + store.key_pair::(&pair.public()).unwrap().public(), ); } @@ -593,18 +590,17 @@ mod tests { let mut keys = Vec::new(); for i in 0..10 { - keys.push(generate::(&store).unwrap().public()); - keys.push(insert_ephemeral_from_seed::( - &mut store, + keys.push(store.generate::().unwrap().public()); + keys.push(store.insert_ephemeral_from_seed::( &format!("0x3d97c819d68f9bafa7d6e79cb991eebcd7{}d966c5334c0b94d9e1fa7ad0869dc", i), ).unwrap().public()); } // Generate a key of a different type - generate::(&store).unwrap(); + store.generate::().unwrap(); keys.sort(); - let mut store_pubs = public_keys::(&store).unwrap(); + let mut store_pubs = store.public_keys::().unwrap(); store_pubs.sort(); assert_eq!(keys, store_pubs); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 2a4dda477ab75..7d613f2bc6292 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -205,12 +205,13 @@ pub type TLightClientWithBackend = Client< TRtApi, >; -/// Construct and hold different layers of Keystore wrappers -pub struct KeystoreContainer { - keystore: Arc, - sync_keystore: SyncCryptoStorePtr, +enum KeystoreContainerInner { + Local(Arc) } +/// Construct and hold different layers of Keystore wrappers +pub struct KeystoreContainer(KeystoreContainerInner); + impl KeystoreContainer { /// Construct KeystoreContainer pub fn new(config: &KeystoreConfig) -> Result { @@ -221,22 +222,36 @@ impl KeystoreContainer { )?, KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); - let sync_keystore = keystore.clone() as SyncCryptoStorePtr; - Ok(Self { - keystore, - sync_keystore, - }) + Ok(Self(KeystoreContainerInner::Local(keystore))) } /// Returns an adapter to the asynchronous keystore that implements `CryptoStore` pub fn keystore(&self) -> Arc { - self.keystore.clone() + match self.0 { + KeystoreContainerInner::Local(ref keystore) => keystore.clone(), + } } /// Returns the synchrnous keystore wrapper pub fn sync_keystore(&self) -> SyncCryptoStorePtr { - self.sync_keystore.clone() + match self.0 { + KeystoreContainerInner::Local(ref keystore) => keystore.clone() as SyncCryptoStorePtr, + } + } + + /// Returns the local keystore if available + /// + /// The function will return None if the available keystore is not a local keystore. + /// + /// # Note + /// + /// Using the [`LocalKeystore`] will result in loosing the ability to use any other keystore implementation, like + /// a remote keystore for example. Only use this if you a certain that you require it! + pub fn local_keystore(&self) -> Option> { + match self.0 { + KeystoreContainerInner::Local(ref keystore) => Some(keystore.clone()), + } } } From 210300dc38713461a5537cb1752c87736670d3ee Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Thu, 12 Nov 2020 02:08:22 +0100 Subject: [PATCH 07/34] Update doc for the --chain flag (#7520) --- client/cli/src/params/shared_params.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index ad9ab04070563..3276e5b7c4ba0 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -23,7 +23,10 @@ use structopt::StructOpt; /// Shared parameters used by all `CoreParams`. #[derive(Debug, StructOpt)] pub struct SharedParams { - /// Specify the chain specification (one of dev, local, or staging). + /// Specify the chain specification. + /// + /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file with + /// the chainspec (such as one exported by the `build-spec` subcommand). #[structopt(long, value_name = "CHAIN_SPEC")] pub chain: Option, From e26a46a805d68ce55457d67552ba71b76421f708 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 12 Nov 2020 16:45:51 +0100 Subject: [PATCH 08/34] contracts: Add missing instruction to the `Schedule` (#7527) --- frame/contracts/src/schedule.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 24f8bb0a02c37..ff2cde2297118 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -607,6 +607,7 @@ impl<'a, T: Trait> rules::Rules for ScheduleRules<'a, T> { I32Clz | I64Clz => w.i64clz, I32Ctz | I64Ctz => w.i64ctz, I32Popcnt | I64Popcnt => w.i64popcnt, + I32Eqz | I64Eqz => w.i64eqz, I64ExtendSI32 => w.i64extendsi32, I64ExtendUI32 => w.i64extendui32, I32WrapI64 => w.i32wrapi64, From 02771e146f126a9759a62948ec8a2edd6ad47ee9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 12 Nov 2020 20:01:58 +0100 Subject: [PATCH 09/34] Don't log with colors when we are writing to a tty (#7525) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Don't log with colors when we are writing to a tty This fixes a regression that was introduced by the switch to tracing. Before we killed all colors before writing to a tty, this pr brings the behaviour back. * Remove accidentally added crate * Review feedback * More feedback * Update client/cli/src/logging.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update client/cli/src/logging.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- Cargo.lock | 9 +++--- client/cli/Cargo.toml | 3 +- client/cli/src/lib.rs | 46 ++++++++++++++++++++++++----- client/cli/src/logging.rs | 59 +++++++++++++++++++++++++++++++++---- client/informant/src/lib.rs | 4 ++- 5 files changed, 102 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62cb03dedafa3..7c9bdc4305d1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6204,9 +6204,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.9" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" dependencies = [ "aho-corasick", "memchr", @@ -6226,9 +6226,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" [[package]] name = "region" @@ -6549,6 +6549,7 @@ dependencies = [ "fdlimit", "futures 0.3.5", "hex", + "lazy_static", "libp2p", "log", "names", diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index b7e798a3ba1c1..51c499828ac2a 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -15,7 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" atty = "0.2.13" -regex = "1.3.4" +regex = "1.4.2" +lazy_static = "1.4.0" ansi_term = "0.12.1" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.4" diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index c25693dc418b1..b543f80a9d3b3 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -47,13 +47,13 @@ use structopt::{ clap::{self, AppSettings}, StructOpt, }; -#[doc(hidden)] -pub use tracing; use tracing_subscriber::{ filter::Directive, fmt::time::ChronoLocal, layer::SubscriberExt, FmtSubscriber, Layer, }; pub use logging::PREFIX_LOG_SPAN; +#[doc(hidden)] +pub use tracing; /// Substrate client CLI /// @@ -308,8 +308,7 @@ pub fn init_logger( } } - let isatty = atty::is(atty::Stream::Stderr); - let enable_color = isatty; + let enable_color = atty::is(atty::Stream::Stderr); let timer = ChronoLocal::with_format(if simple { "%Y-%m-%d %H:%M:%S".to_string() } else { @@ -321,12 +320,13 @@ pub fn init_logger( .with_writer(std::io::stderr) .event_format(logging::EventFormat { timer, - ansi: enable_color, display_target: !simple, display_level: !simple, display_thread_name: !simple, + enable_color, }) - .finish().with(logging::NodeNameLayer); + .finish() + .with(logging::NodeNameLayer); if let Some(profiling_targets) = profiling_targets { let profiling = sc_tracing::ProfilingLayer::new(tracing_receiver, &profiling_targets); @@ -450,8 +450,7 @@ mod tests { #[test] fn prefix_in_log_lines_entrypoint() { if env::var("ENABLE_LOGGING").is_ok() { - let test_pattern = "test-target=info"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); + init_logger("", Default::default(), Default::default()).unwrap(); prefix_in_log_lines_process(); } } @@ -460,4 +459,35 @@ mod tests { fn prefix_in_log_lines_process() { log::info!("{}", EXPECTED_LOG_MESSAGE); } + + /// This is no actual test, it will be used by the `do_not_write_with_colors_on_tty` test. + /// The given test will call the test executable to only execute this test that + /// will only print a log line with some colors in it. + #[test] + fn do_not_write_with_colors_on_tty_entrypoint() { + if env::var("ENABLE_LOGGING").is_ok() { + init_logger("", Default::default(), Default::default()).unwrap(); + log::info!("{}", ansi_term::Colour::Yellow.paint(EXPECTED_LOG_MESSAGE)); + } + } + + #[test] + fn do_not_write_with_colors_on_tty() { + let re = regex::Regex::new(&format!( + r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} {}$", + EXPECTED_LOG_MESSAGE, + )).unwrap(); + let executable = env::current_exe().unwrap(); + let output = Command::new(executable) + .env("ENABLE_LOGGING", "1") + .args(&["--nocapture", "do_not_write_with_colors_on_tty_entrypoint"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stderr).unwrap(); + assert!( + re.is_match(output.trim()), + format!("Expected:\n{}\nGot:\n{}", re, output), + ); + } } diff --git a/client/cli/src/logging.rs b/client/cli/src/logging.rs index e1fc90505b45f..ffb4c3dfaafa1 100644 --- a/client/cli/src/logging.rs +++ b/client/cli/src/logging.rs @@ -16,8 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::{self, Write}; use ansi_term::Colour; -use std::fmt; use tracing::{span::Attributes, Event, Id, Level, Subscriber}; use tracing_log::NormalizeEvent; use tracing_subscriber::{ @@ -29,16 +29,62 @@ use tracing_subscriber::{ registry::LookupSpan, Layer, }; +use regex::Regex; /// Span name used for the logging prefix. See macro `sc_cli::prefix_logs_with!` pub const PREFIX_LOG_SPAN: &str = "substrate-log-prefix"; +/// A writer that may write to `inner_writer` with colors. +/// +/// This is used by [`EventFormat`] to kill colors when `enable_color` is `false`. +/// +/// It is required to call [`MaybeColorWriter::write`] after all writes are done, +/// because the content of these writes is buffered and will only be written to the +/// `inner_writer` at that point. +struct MaybeColorWriter<'a> { + enable_color: bool, + buffer: String, + inner_writer: &'a mut dyn fmt::Write, +} + +impl<'a> fmt::Write for MaybeColorWriter<'a> { + fn write_str(&mut self, buf: &str) -> fmt::Result { + self.buffer.push_str(buf); + Ok(()) + } +} + +impl<'a> MaybeColorWriter<'a> { + /// Creates a new instance. + fn new(enable_color: bool, inner_writer: &'a mut dyn fmt::Write) -> Self { + Self { + enable_color, + inner_writer, + buffer: String::new(), + } + } + + /// Write the buffered content to the `inner_writer`. + fn write(&mut self) -> fmt::Result { + lazy_static::lazy_static! { + static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); + } + + if !self.enable_color { + let replaced = RE.replace_all(&self.buffer, ""); + self.inner_writer.write_str(&replaced) + } else { + self.inner_writer.write_str(&self.buffer) + } + } +} + pub(crate) struct EventFormat { pub(crate) timer: T, - pub(crate) ansi: bool, pub(crate) display_target: bool, pub(crate) display_level: bool, pub(crate) display_thread_name: bool, + pub(crate) enable_color: bool, } // NOTE: the following code took inspiration from tracing-subscriber @@ -56,12 +102,13 @@ where writer: &mut dyn fmt::Write, event: &Event, ) -> fmt::Result { + let writer = &mut MaybeColorWriter::new(self.enable_color, writer); let normalized_meta = event.normalized_metadata(); let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); - time::write(&self.timer, writer, self.ansi)?; + time::write(&self.timer, writer, self.enable_color)?; if self.display_level { - let fmt_level = { FmtLevel::new(meta.level(), self.ansi) }; + let fmt_level = { FmtLevel::new(meta.level(), self.enable_color) }; write!(writer, "{} ", fmt_level)?; } @@ -94,7 +141,9 @@ where write!(writer, "{}:", meta.target())?; } ctx.format_fields(writer, event)?; - writeln!(writer) + writeln!(writer)?; + + writer.write() } } diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index c60eda76f63f6..d4f34cb488a97 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -35,7 +35,9 @@ mod display; /// The format to print telemetry output in. #[derive(Clone, Debug)] pub struct OutputFormat { - /// Enable color output in logs. True by default. + /// Enable color output in logs. + /// + /// Is enabled by default. pub enable_color: bool, } From 1c0f76b168345d1a42bb6a106d74d22d10ae2310 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 14:48:23 +0100 Subject: [PATCH 10/34] MemoryId -> u32 (#7534) --- client/executor/wasmtime/src/host.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index eeb7cb927167f..8d20c9a566dc8 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -232,7 +232,7 @@ impl<'a> Sandbox for HostContext<'a> { .map_err(|e| e.to_string()) } - fn memory_new(&mut self, initial: u32, maximum: MemoryId) -> sp_wasm_interface::Result { + fn memory_new(&mut self, initial: u32, maximum: u32) -> sp_wasm_interface::Result { self.sandbox_store .borrow_mut() .new_memory(initial, maximum) From 5222a8f4a71134e3a95f268d1bcd9c8c5715b048 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 13 Nov 2020 22:38:55 +0100 Subject: [PATCH 11/34] Enable local addresses in DHT when chain type == `Local` | `Development` (#7538) * Enable local addresses in DHT when chain type == `Local` | `Development` This pr changes when to add local addresses to DHT. Instead of only checking if `--discover-local` and `--dev` are present, we now also check if the chain type is `Local` or `Development`. * Update the docs! --- client/cli/src/params/network_params.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 209742f54e9b8..a973d61272ced 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -21,7 +21,7 @@ use sc_network::{ config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, TransportConfig}, multiaddr::Protocol, }; -use sc_service::{ChainSpec, config::{Multiaddr, MultiaddrWithPeerId}}; +use sc_service::{ChainSpec, ChainType, config::{Multiaddr, MultiaddrWithPeerId}}; use std::path::PathBuf; use structopt::StructOpt; @@ -94,7 +94,8 @@ pub struct NetworkParams { /// Enable peer discovery on local networks. /// - /// By default this option is true for `--dev` and false otherwise. + /// By default this option is `true` for `--dev` or when the chain type is `Local`/`Development` + /// and false otherwise. #[structopt(long)] pub discover_local: bool, @@ -139,6 +140,13 @@ impl NetworkParams { let mut boot_nodes = chain_spec.boot_nodes().to_vec(); boot_nodes.extend(self.bootnodes.clone()); + let chain_type = chain_spec.chain_type(); + // Activate if the user explicitly requested local discovery, `--dev` is given or the + // chain type is `Local`/`Development` + let allow_non_globals_in_dht = self.discover_local + || is_dev + || matches!(chain_type, ChainType::Local | ChainType::Development); + NetworkConfiguration { boot_nodes, net_config_path, @@ -163,7 +171,7 @@ impl NetworkParams { wasm_external_transport: None, }, max_parallel_downloads: self.max_parallel_downloads, - allow_non_globals_in_dht: self.discover_local || is_dev, + allow_non_globals_in_dht, kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths, } } From 77007ed87727a761169d4c57cbeae0151d9efb81 Mon Sep 17 00:00:00 2001 From: Kirill Pimenov Date: Sat, 14 Nov 2020 07:19:26 +0100 Subject: [PATCH 12/34] Update tiny-bip39 to v0.8 (#7539) It would improve secret zeroization due to https://github.com/maciejhirsz/tiny-bip39/pull/22, and would also remove one of the points where we depend on `failure` crate, which is deprecated (see https://github.com/rust-lang-nursery/failure/pull/347) --- Cargo.lock | 63 +++++++++++++++++++++++++------------- primitives/core/Cargo.toml | 2 +- 2 files changed, 43 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c9bdc4305d1a..fca7a32c96d9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -134,9 +134,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.31" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f" +checksum = "bf8dcb5b4bbaa28653b647d8c77bd4ed40183b48882e130c1f1ffb73de069fd7" [[package]] name = "approx" @@ -462,9 +462,9 @@ checksum = "7059804e226b3ac116519a252d7f5fb985a5ccc0e93255e036a5f7e7283323f4" dependencies = [ "failure", "hashbrown 0.1.8", - "hmac", + "hmac 0.7.1", "once_cell 0.1.8", - "pbkdf2", + "pbkdf2 0.3.0", "rand 0.6.5", "sha2 0.8.2", ] @@ -2288,6 +2288,16 @@ dependencies = [ "digest 0.8.1", ] +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + [[package]] name = "hmac-drbg" version = "0.2.0" @@ -2296,7 +2306,7 @@ checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" dependencies = [ "digest 0.8.1", "generic-array 0.12.3", - "hmac", + "hmac 0.7.1", ] [[package]] @@ -5474,6 +5484,15 @@ dependencies = [ "rayon", ] +[[package]] +name = "pbkdf2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", +] + [[package]] name = "pdqselect" version = "0.1.0" @@ -8896,8 +8915,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" dependencies = [ - "hmac", - "pbkdf2", + "hmac 0.7.1", + "pbkdf2 0.3.0", "schnorrkel", "sha2 0.8.2", "zeroize", @@ -9168,9 +9187,9 @@ checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" [[package]] name = "syn" -version = "1.0.44" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e03e57e4fcbfe7749842d53e24ccb9aa12b7252dbe5e91d2acad31834c8b8fdd" +checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" dependencies = [ "proc-macro2", "quote", @@ -9259,18 +9278,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42" +checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab" +checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" dependencies = [ "proc-macro2", "quote", @@ -9307,18 +9326,20 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" dependencies = [ - "failure", - "hmac", + "anyhow", + "hmac 0.8.1", "once_cell 1.4.1", - "pbkdf2", + "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.8.2", + "sha2 0.9.1", + "thiserror", "unicode-normalization", + "zeroize", ] [[package]] @@ -10514,9 +10535,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" dependencies = [ "zeroize_derive", ] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 1757bb4e0d520..f6989a0df4f0f 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -26,7 +26,7 @@ hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.1.0", optional = true } rand = { version = "0.7.3", optional = true, features = ["small_rng"] } substrate-bip39 = { version = "0.4.2", optional = true } -tiny-bip39 = { version = "0.7", optional = true } +tiny-bip39 = { version = "0.8", optional = true } regex = { version = "1.3.1", optional = true } num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.0.0", default-features = false } From f31e7784e8b7c1c14535274c7b6c0be7b8310b4b Mon Sep 17 00:00:00 2001 From: Andrew Plaza Date: Mon, 16 Nov 2020 12:40:35 +0100 Subject: [PATCH 13/34] make LocalCallExecutor public (#7528) --- client/service/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index cb741c2920b06..8e6b0037bdf93 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -80,6 +80,7 @@ pub use sc_tracing::TracingReceiver; pub use task_manager::SpawnTaskHandle; pub use task_manager::TaskManager; pub use sp_consensus::import_queue::ImportQueue; +pub use self::client::LocalCallExecutor; use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; const DEFAULT_PROTOCOL_ID: &str = "sup"; From 74e01c84ca73b22cb9053e6e641a61ed87c31f7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 16 Nov 2020 14:15:05 +0100 Subject: [PATCH 14/34] Fix some weirdness in `offchain_worker` (#7541) We call `offchain_worker` with the state of the imported block and pass the header of this block. However in the runtime we call all `offchain_worker` functions with the number of the parent block. Besides that we also pass all digests and not only the pre runtime digests. In the context where the offchain worker is executed we have all digests, so there is no real reason to only pass pre runtime digests. Another fix is that we also insert the hash of the current header into the block hash map. --- frame/executive/src/lib.rs | 44 +++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 961dbc4376a55..ccb5c2d262871 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -117,7 +117,7 @@ use sp_std::{prelude::*, marker::PhantomData}; use frame_support::{ - storage::StorageValue, weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, + StorageValue, StorageMap, weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, traits::{OnInitialize, OnFinalize, OnRuntimeUpgrade, OffchainWorker}, dispatch::PostDispatchInfo, }; @@ -453,7 +453,7 @@ where // We need to keep events available for offchain workers, // hence we initialize the block manually. // OffchainWorker RuntimeApi should skip initialization. - let digests = Self::extract_pre_digest(header); + let digests = header.digest().clone(); >::initialize( header.number(), @@ -463,15 +463,16 @@ where frame_system::InitKind::Inspection, ); + // Frame system only inserts the parent hash into the block hashes as normally we don't know + // the hash for the header before. However, here we are aware of the hash and we can add it + // as well. + frame_system::BlockHash::::insert(header.number(), header.hash()); + // Initialize logger, so the log messages are visible // also when running WASM. frame_support::debug::RuntimeLogger::init(); - >::offchain_worker( - // to maintain backward compatibility we call module offchain workers - // with parent block number. - header.number().saturating_sub(1u32.into()) - ) + >::offchain_worker(*header.number()) } } @@ -481,7 +482,7 @@ mod tests { use super::*; use sp_core::H256; use sp_runtime::{ - generic::Era, Perbill, DispatchError, testing::{Digest, Header, Block}, + generic::{Era, DigestItem}, Perbill, DispatchError, testing::{Digest, Header, Block}, traits::{Header as HeaderT, BlakeTwo256, IdentityLookup}, transaction_validity::{ InvalidTransaction, ValidTransaction, TransactionValidityError, UnknownTransaction @@ -547,6 +548,10 @@ mod tests { sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); 200 } + + fn offchain_worker(n: T::BlockNumber) { + assert_eq!(T::BlockNumber::from(1u32), n); + } } } @@ -1115,4 +1120,27 @@ mod tests { ); }); } + + #[test] + fn offchain_worker_works_as_expected() { + new_test_ext(1).execute_with(|| { + let parent_hash = sp_core::H256::from([69u8; 32]); + let mut digest = Digest::default(); + digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); + + let header = Header::new( + 1, + H256::default(), + H256::default(), + parent_hash, + digest.clone(), + ); + + Executive::offchain_worker(&header); + + assert_eq!(digest, System::digest()); + assert_eq!(parent_hash, System::block_hash(0)); + assert_eq!(header.hash(), System::block_hash(1)); + }); + } } From 99602cda4e1c06d66a40aa933789308d4ca9a7ce Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 16 Nov 2020 16:46:36 +0100 Subject: [PATCH 15/34] Use inbound peerslot slots when a substream is received, rather than a connection (#7464) * Use inbound peerslot slots when a substream is received, rather than a connection * Refactor PeerState * Some bugfixes * Fix warnings so that CI runs, gmlrlblbl * Bugfixes * Update docs * Apply suggestions from code review Co-authored-by: Roman Borschel * Clean up Banned state * Refactor connections state * Fix possibility of Enabled with no Opening or Open connection * Line width * Add some debug_asserts! and fix TODO * Refactor legacy handler * Rewrite group.rs entirely [part 1] * Rewrite group.rs entirely [part 2] * Remove faulty assertion Because of the asynchronous nature of the behaviour <-> handler communications, it is possible to receive notifications while in the Closing state * Don't poll the legacy substream is not Open * Tolerate when not all substreams are accepted * Remove TODOs * Dummy commit to make CI log interesting things * Try race condition fix * Revert "Try race condition fix" This reverts commit 0675c659d06195c30f8c5bc13e2d88141d57a3ba. * Correctly rebuild pending_opening * Minor tweaks * Printlns for CI debugging * Revert "Printlns for CI debugging" This reverts commit e7852a231f4fc418898767aaa27c9a4358e12e8b. * Revert "Dummy commit to make CI log interesting things" This reverts commit 259ddd74088e53e7c6a9b0a62a8d1573a0063ce3. * mv group.rs ../handler.rs * Apply suggestions from code review Co-authored-by: Max Inden * Banned => Backoff * Mention the actual PeerStates * OpenDesired -> OpenDesiredByRemote * OpeningThenClosing * Add doc links to PeerState * Simplify increment logic * One more debug_assert * debug_assert! * OpenDesiredByRemote * Update client/network/src/protocol/generic_proto/behaviour.rs Co-authored-by: Max Inden Co-authored-by: Roman Borschel Co-authored-by: Max Inden --- client/network/src/protocol.rs | 4 +- client/network/src/protocol/generic_proto.rs | 2 +- .../src/protocol/generic_proto/behaviour.rs | 1672 +++++++++++------ .../src/protocol/generic_proto/handler.rs | 1055 ++++++++++- .../protocol/generic_proto/handler/group.rs | 737 -------- .../protocol/generic_proto/handler/legacy.rs | 559 ------ .../generic_proto/handler/notif_in.rs | 293 --- .../generic_proto/handler/notif_out.rs | 444 ----- .../protocol/generic_proto/upgrade/legacy.rs | 26 +- client/network/src/service.rs | 5 +- 10 files changed, 2197 insertions(+), 2600 deletions(-) delete mode 100644 client/network/src/protocol/generic_proto/handler/group.rs delete mode 100644 client/network/src/protocol/generic_proto/handler/legacy.rs delete mode 100644 client/network/src/protocol/generic_proto/handler/notif_in.rs delete mode 100644 client/network/src/protocol/generic_proto/handler/notif_out.rs diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index ac74af0f5ca94..9403e471b0f27 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -63,7 +63,7 @@ pub mod message; pub mod event; pub mod sync; -pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError, LegacyConnectionKillError}; +pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError}; const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance @@ -1668,7 +1668,7 @@ impl NetworkBehaviour for Protocol { notifications_sink, } }, - GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { + GenericProtoOut::CustomProtocolClosed { peer_id } => { self.on_peer_disconnected(peer_id) }, GenericProtoOut::LegacyMessage { peer_id, message } => diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/generic_proto.rs index 3133471b0d249..4d6e607a146e7 100644 --- a/client/network/src/protocol/generic_proto.rs +++ b/client/network/src/protocol/generic_proto.rs @@ -21,7 +21,7 @@ //! network, then performs the Substrate protocol handling on top. pub use self::behaviour::{GenericProto, GenericProtoOut}; -pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready, LegacyConnectionKillError}; +pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready}; mod behaviour; mod handler; diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 7b62b154016c3..f84aead47283a 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -42,45 +42,35 @@ use wasm_timer::Instant; /// Network behaviour that handles opening substreams for custom protocols with other peers. /// -/// ## Legacy vs new protocol -/// -/// The `GenericProto` behaves as following: -/// -/// - Whenever a connection is established, we open a single substream (called "legacy protocol" in -/// the source code) on that connection. This substream name depends on the `protocol_id` and -/// `versions` passed at initialization. If the remote refuses this substream, we close the -/// connection. -/// -/// - For each registered protocol, we also open an additional substream for this protocol. If the -/// remote refuses this substream, then it's fine. -/// -/// - Whenever we want to send a message, we can call either `send_packet` to force the legacy -/// substream, or `write_notification` to indicate a registered protocol. If the registered -/// protocol was refused or isn't supported by the remote, we always use the legacy instead. -/// -/// ## How it works +/// # How it works /// /// The role of the `GenericProto` is to synchronize the following components: /// /// - The libp2p swarm that opens new connections and reports disconnects. -/// - The connection handler (see `handler.rs`) that handles individual connections. +/// - The connection handler (see `group.rs`) that handles individual connections. /// - The peerset manager (PSM) that requests links to peers to be established or broken. /// - The external API, that requires knowledge of the links that have been established. /// -/// Each connection handler can be in four different states: Enabled+Open, Enabled+Closed, -/// Disabled+Open, or Disabled+Closed. The Enabled/Disabled component must be in sync with the -/// peerset manager. For example, if the peerset manager requires a disconnection, we disable the -/// connection handlers of that peer. The Open/Closed component must be in sync with the external -/// API. +/// In the state machine below, each `PeerId` is attributed one of these states: +/// +/// - [`PeerState::Requested`]: No open connection, but requested by the peerset. Currently dialing. +/// - [`PeerState::Disabled`]: Has open TCP connection(s) unbeknownst to the peerset. No substream +/// is open. +/// - [`PeerState::Enabled`]: Has open TCP connection(s), acknowledged by the peerset. +/// - Notifications substreams are open on at least one connection, and external +/// API has been notified. +/// - Notifications substreams aren't open. +/// - [`PeerState::Incoming`]: Has open TCP connection(s) and remote would like to open substreams. +/// Peerset has been asked to attribute an inbound slot. /// -/// However, a connection handler for a peer only exists if we are actually connected to that peer. -/// What this means is that there are six possible states for each peer: Disconnected, Dialing -/// (trying to connect), Enabled+Open, Enabled+Closed, Disabled+Open, Disabled+Closed. -/// Most notably, the Dialing state must correspond to a "link established" state in the peerset -/// manager. In other words, the peerset manager doesn't differentiate whether we are dialing a -/// peer or connected to it. +/// In addition to these states, there also exists a "banning" system. If we fail to dial a peer, +/// we back-off for a few seconds. If the PSM requests connecting to a peer that is currently +/// backed-off, the next dialing attempt is delayed until after the ban expires. However, the PSM +/// will still consider the peer to be connected. This "ban" is thus not a ban in a strict sense: +/// if a backed-off peer tries to connect, the connection is accepted. A ban only delays dialing +/// attempts. /// -/// There may be multiple connections to a peer. However, the status of a peer on +/// There may be multiple connections to a peer. The status of a peer on /// the API of this behaviour and towards the peerset manager is aggregated in /// the following way: /// @@ -94,9 +84,9 @@ use wasm_timer::Instant; /// in terms of potential reordering and dropped messages. Messages can /// be received on any connection. /// 3. The behaviour reports `GenericProtoOut::CustomProtocolOpen` when the -/// first connection reports `NotifsHandlerOut::Open`. +/// first connection reports `NotifsHandlerOut::OpenResultOk`. /// 4. The behaviour reports `GenericProtoOut::CustomProtocolClosed` when the -/// last connection reports `NotifsHandlerOut::Closed`. +/// last connection reports `NotifsHandlerOut::ClosedResult`. /// /// In this way, the number of actual established connections to the peer is /// an implementation detail of this behaviour. Note that, in practice and at @@ -104,12 +94,6 @@ use wasm_timer::Instant; /// and only as a result of simultaneous dialing. However, the implementation /// accommodates for any number of connections. /// -/// Additionally, there also exists a "banning" system. If we fail to dial a peer, we "ban" it for -/// a few seconds. If the PSM requests connecting to a peer that is currently "banned", the next -/// dialing attempt is delayed until after the ban expires. However, the PSM will still consider -/// the peer to be connected. This "ban" is thus not a ban in a strict sense: If a "banned" peer -/// tries to connect, the connection is accepted. A ban only delays dialing attempts. -/// pub struct GenericProto { /// `PeerId` of the local node. local_peer_id: PeerId, @@ -157,6 +141,8 @@ pub struct GenericProto { struct DelayId(u64); /// State of a peer we're connected to. +/// +/// The variants correspond to the state of the peer w.r.t. the peerset. #[derive(Debug)] enum PeerState { /// State is poisoned. This is a temporary state for a peer and we should always switch back @@ -166,9 +152,11 @@ enum PeerState { /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial /// delay to the connection. - Banned { - /// Until when the peer is banned. - until: Instant, + Backoff { + /// When the ban expires. For clean-up purposes. References an entry in `delays`. + timer: DelayId, + /// Until when the peer is backed-off. + timer_deadline: Instant, }, /// The peerset requested that we connect to this peer. We are currently not connected. @@ -182,40 +170,54 @@ enum PeerState { /// The peerset requested that we connect to this peer. We are currently dialing this peer. Requested, - /// We are connected to this peer but the peerset refused it. + /// We are connected to this peer but the peerset hasn't requested it or has denied it. /// - /// We may still have ongoing traffic with that peer, but it should cease shortly. + /// The handler is either in the closed state, or a `Close` message has been sent to it and + /// hasn't been answered yet. Disabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, - /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. - banned_until: Option, + /// If `Some`, any connection request from the peerset to this peer is delayed until the + /// given `Instant`. + backoff_until: Option, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We are connected to this peer but we are not opening any Substrate substream. The handler - /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, - /// but should get disconnected in a few seconds. + /// We are connected to this peer. The peerset has requested a connection to this peer, but + /// it is currently in a "backed-off" phase. The state will switch to `Enabled` once the timer + /// expires. + /// + /// The handler is either in the closed state, or a `Close` message has been sent to it and + /// hasn't been answered yet. + /// + /// The handler will be opened when `timer` fires. DisabledPendingEnable { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, /// When to enable this remote. References an entry in `delays`. timer: DelayId, /// When the `timer` will trigger. timer_deadline: Instant, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We are connected to this peer and the peerset has accepted it. The handler is in the - /// enabled state. + /// We are connected to this peer and the peerset has accepted it. Enabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We received an incoming connection from this peer and forwarded that - /// connection request to the peerset. The connection handlers are waiting - /// for initialisation, i.e. to be enabled or disabled based on whether - /// the peerset accepts or rejects the peer. - Incoming, + /// We are connected to this peer. We have received an `OpenDesiredByRemote` from one of the + /// handlers and forwarded that request to the peerset. The connection handlers are waiting for + /// a response, i.e. to be opened or closed based on whether the peerset accepts or rejects + /// the peer. + Incoming { + /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. + backoff_until: Option, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, + }, } impl PeerState { @@ -229,18 +231,19 @@ impl PeerState { /// that is open for custom protocol traffic. fn get_open(&self) -> Option<&NotificationsSink> { match self { - PeerState::Disabled { open, .. } | - PeerState::DisabledPendingEnable { open, .. } | - PeerState::Enabled { open, .. } => - if !open.is_empty() { - Some(&open[0].1) - } else { - None - } + PeerState::Enabled { connections, .. } => connections + .iter() + .filter_map(|(_, s)| match s { + ConnectionState::Open(s) => Some(s), + _ => None, + }) + .next(), PeerState::Poisoned => None, - PeerState::Banned { .. } => None, + PeerState::Backoff { .. } => None, PeerState::PendingRequest { .. } => None, PeerState::Requested => None, + PeerState::Disabled { .. } => None, + PeerState::DisabledPendingEnable { .. } => None, PeerState::Incoming { .. } => None, } } @@ -249,7 +252,7 @@ impl PeerState { fn is_requested(&self) -> bool { match self { PeerState::Poisoned => false, - PeerState::Banned { .. } => false, + PeerState::Backoff { .. } => false, PeerState::PendingRequest { .. } => true, PeerState::Requested => true, PeerState::Disabled { .. } => false, @@ -260,6 +263,37 @@ impl PeerState { } } +/// State of the handler of a single connection visible from this state machine. +#[derive(Debug)] +enum ConnectionState { + /// Connection is in the `Closed` state, meaning that the remote hasn't requested anything. + Closed, + + /// Connection is either in the `Open` or the `Closed` state, but a + /// [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be + /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. + Closing, + + /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message has been sent. + /// An `OpenResultOk`/`OpenResultErr` message is expected. + Opening, + + /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message then a + /// [`NotifsHandlerIn::Close`] message has been sent. An `OpenResultOk`/`OpenResultErr` message + /// followed with a `CloseResult` message are expected. + OpeningThenClosing, + + /// Connection is in the `Closed` state, but a [`NotifsHandlerOut::OpenDesiredByRemote`] + /// message has been received, meaning that the remote wants to open a substream. + OpenDesiredByRemote, + + /// Connection is in the `Open` state. + /// + /// The external API is notified of a channel with this peer if any of its connection is in + /// this state. + Open(NotificationsSink), +} + /// State of an "incoming" message sent to the peer set manager. #[derive(Debug)] struct IncomingPeer { @@ -303,8 +337,6 @@ pub enum GenericProtoOut { CustomProtocolClosed { /// Id of the peer we were connected to. peer_id: PeerId, - /// Reason why the substream closed, for debugging purposes. - reason: Cow<'static, str>, }, /// Receives a message on the legacy substream. @@ -438,46 +470,79 @@ impl GenericProto { st @ PeerState::Disabled { .. } => *entry.into_mut() = st, st @ PeerState::Requested => *entry.into_mut() = st, st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, - st @ PeerState::Banned { .. } => *entry.into_mut() = st, + st @ PeerState::Backoff { .. } => *entry.into_mut() = st, // DisabledPendingEnable => Disabled. PeerState::DisabledPendingEnable { - open, + connections, timer_deadline, timer: _ } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); - let banned_until = Some(if let Some(ban) = ban { + let backoff_until = Some(if let Some(ban) = ban { cmp::max(timer_deadline, Instant::now() + ban) } else { timer_deadline }); *entry.into_mut() = PeerState::Disabled { - open, - banned_until + connections, + backoff_until } }, // Enabled => Disabled. - PeerState::Enabled { open } => { + // All open or opening connections are sent a `Close` message. + // If relevant, the external API is instantly notified. + PeerState::Enabled { mut connections } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); + + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close, + }); + *connec_state = ConnectionState::Closing; + } + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Opening)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close, + }); + *connec_state = ConnectionState::OpeningThenClosing; + } + + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_)))); + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening))); + + let backoff_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { - open, - banned_until + connections, + backoff_until } }, // Incoming => Disabled. - PeerState::Incoming => { + // Ongoing opening requests from the remote are rejected. + PeerState::Incoming { mut connections, backoff_until } => { let inc = if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == *entry.key() && i.alive) { inc @@ -488,16 +553,30 @@ impl GenericProto { }; inc.alive = false; - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close, + }); + *connec_state = ConnectionState::Closing; + } + + let backoff_until = match (backoff_until, ban) { + (Some(a), Some(b)) => Some(cmp::max(a, Instant::now() + b)), + (Some(a), None) => Some(a), + (None, Some(b)) => Some(Instant::now() + b), + (None, None) => None, + }; + + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); *entry.into_mut() = PeerState::Disabled { - open: SmallVec::new(), - banned_until + connections, + backoff_until } }, @@ -521,7 +600,7 @@ impl GenericProto { Some(PeerState::Incoming { .. }) => false, Some(PeerState::Requested) => false, Some(PeerState::PendingRequest { .. }) => false, - Some(PeerState::Banned { .. }) => false, + Some(PeerState::Backoff { .. }) => false, Some(PeerState::Poisoned) => false, } } @@ -591,7 +670,8 @@ impl GenericProto { /// Function that is called when the peerset wants us to connect to a peer. fn peerset_report_connect(&mut self, peer_id: PeerId) { - let mut occ_entry = match self.peers.entry(peer_id) { + // If `PeerId` is unknown to us, insert an entry, start dialing, and return early. + let mut occ_entry = match self.peers.entry(peer_id.clone()) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { // If there's no entry in `self.peers`, start dialing. @@ -609,26 +689,19 @@ impl GenericProto { let now = Instant::now(); match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { - PeerState::Banned { ref until } if *until > now => { + // Backoff (not expired) => PendingRequest + PeerState::Backoff { ref timer, ref timer_deadline } if *timer_deadline > now => { let peer_id = occ_entry.key().clone(); debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ - until {:?}", peer_id, until); - - let delay_id = self.next_delay_id; - self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(*until - now); - self.delays.push(async move { - delay.await; - (delay_id, peer_id) - }.boxed()); - + until {:?}", peer_id, timer_deadline); *occ_entry.into_mut() = PeerState::PendingRequest { - timer: delay_id, - timer_deadline: *until, + timer: *timer, + timer_deadline: *timer_deadline, }; }, - PeerState::Banned { .. } => { + // Backoff (expired) => Requested + PeerState::Backoff { .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); self.events.push_back(NetworkBehaviourAction::DialPeer { @@ -638,42 +711,90 @@ impl GenericProto { *occ_entry.into_mut() = PeerState::Requested; }, + // Disabled (with non-expired ban) => DisabledPendingEnable PeerState::Disabled { - open, - banned_until: Some(ref banned) - } if *banned > now => { + connections, + backoff_until: Some(ref backoff) + } if *backoff > now => { let peer_id = occ_entry.key().clone(); - debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}", - peer_id, banned); + debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is backed-off until {:?}", + peer_id, backoff); let delay_id = self.next_delay_id; self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(*banned - now); + let delay = futures_timer::Delay::new(*backoff - now); self.delays.push(async move { delay.await; (delay_id, peer_id) }.boxed()); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - open, + connections, timer: delay_id, - timer_deadline: *banned, + timer_deadline: *backoff, }; }, - PeerState::Disabled { open, banned_until: _ } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", - occ_entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *occ_entry.into_mut() = PeerState::Enabled { open }; + // Disabled => Enabled + PeerState::Disabled { mut connections, backoff_until } => { + debug_assert!(!connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::Open(_)) + })); + + // The first element of `closed` is chosen to open the notifications substream. + if let Some((connec_id, connec_state)) = connections.iter_mut() + .find(|(_, s)| matches!(s, ConnectionState::Closed)) + { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", + occ_entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open, + }); + *connec_state = ConnectionState::Opening; + *occ_entry.into_mut() = PeerState::Enabled { connections }; + } else { + // If no connection is available, switch to `DisabledPendingEnable` in order + // to try again later. + debug_assert!(connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::OpeningThenClosing | ConnectionState::Closing) + })); + debug!( + target: "sub-libp2p", + "PSM => Connect({:?}): No connection in proper state. Delaying.", + occ_entry.key() + ); + + let timer_deadline = { + let base = now + Duration::from_secs(5); + if let Some(backoff_until) = backoff_until { + cmp::max(base, backoff_until) + } else { + base + } + }; + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + debug_assert!(timer_deadline > now); + let delay = futures_timer::Delay::new(timer_deadline - now); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *occ_entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer: delay_id, + timer_deadline, + }; + } }, - PeerState::Incoming => { + // Incoming => Enabled + PeerState::Incoming { mut connections, .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); if let Some(inc) = self.incoming.iter_mut() @@ -683,36 +804,50 @@ impl GenericProto { error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer") } - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *occ_entry.into_mut() = PeerState::Enabled { open: SmallVec::new() }; + + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", occ_entry.key(), *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: occ_entry.key().clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open, + }); + *connec_state = ConnectionState::Opening; + } + + *occ_entry.into_mut() = PeerState::Enabled { connections }; }, + // Other states are kept as-is. st @ PeerState::Enabled { .. } => { warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already connected.", occ_entry.key()); *occ_entry.into_mut() = st; + debug_assert!(false); }, st @ PeerState::DisabledPendingEnable { .. } => { warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already pending enabling.", occ_entry.key()); *occ_entry.into_mut() = st; + debug_assert!(false); }, st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { warn!(target: "sub-libp2p", "PSM => Connect({:?}): Duplicate request.", occ_entry.key()); *occ_entry.into_mut() = st; + debug_assert!(false); }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()), + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()); + debug_assert!(false); + }, } } @@ -727,43 +862,66 @@ impl GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { + st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); *entry.into_mut() = st; }, - PeerState::DisabledPendingEnable { - open, - timer_deadline, - timer: _ - } => { + // DisabledPendingEnable => Disabled + PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { + debug_assert!(!connections.is_empty()); debug!(target: "sub-libp2p", "PSM => Drop({:?}): Interrupting pending enabling.", entry.key()); *entry.into_mut() = PeerState::Disabled { - open, - banned_until: Some(timer_deadline), + connections, + backoff_until: Some(timer_deadline), }; }, - PeerState::Enabled { open } => { + // Enabled => Disabled + PeerState::Enabled { mut connections } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None + + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { + debug!(target: "sub-libp2p", "External API <= Closed({})", entry.key()); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: entry.key().clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Opening)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close, + }); + *connec_state = ConnectionState::OpeningThenClosing; + } + + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close, + }); + *connec_state = ConnectionState::Closing; + } + + *entry.into_mut() = PeerState::Disabled { connections, backoff_until: None } }, - st @ PeerState::Incoming => { - error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", - entry.key()); - *entry.into_mut() = st; - }, + + // Requested => Ø PeerState::Requested => { // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other // sub-systems (such as the discovery mechanism) may require dialing this peer as @@ -771,13 +929,24 @@ impl GenericProto { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key()); entry.remove(); }, - PeerState::PendingRequest { timer_deadline, .. } => { + + // PendingRequest => Backoff + PeerState::PendingRequest { timer, timer_deadline } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); - *entry.into_mut() = PeerState::Banned { until: timer_deadline } + *entry.into_mut() = PeerState::Backoff { timer, timer_deadline } }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()), + // Invalid state transitions. + st @ PeerState::Incoming { .. } => { + error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", + entry.key()); + *entry.into_mut() = st; + debug_assert!(!false); + }, + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()); + debug_assert!(!false); + }, } } @@ -792,28 +961,56 @@ impl GenericProto { }; if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, - sending back dropped", index, incoming.peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); - self.peerset.dropped(incoming.peer_id); + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming", + index, incoming.peer_id); + match self.peers.get_mut(&incoming.peer_id) { + Some(PeerState::DisabledPendingEnable { .. }) | + Some(PeerState::Enabled { .. }) => {} + _ => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); + self.peerset.dropped(incoming.peer_id); + }, + } return } - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { + let state = match self.peers.get_mut(&incoming.peer_id) { + Some(s) => s, + None => { + debug_assert!(false); + return; + } + }; + + match mem::replace(state, PeerState::Poisoned) { + // Incoming => Enabled + PeerState::Incoming { mut connections, .. } => { debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *state = PeerState::Enabled { open: SmallVec::new() }; + + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", incoming.peer_id, *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open, + }); + *connec_state = ConnectionState::Opening; + } + + *state = PeerState::Enabled { connections }; + } + + // Any state other than `Incoming` is invalid. + peer => { + error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer); + debug_assert!(false); } - peer => error!(target: "sub-libp2p", - "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer) } } @@ -832,20 +1029,34 @@ impl GenericProto { return } - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { + let state = match self.peers.get_mut(&incoming.peer_id) { + Some(s) => s, + None => { + debug_assert!(false); + return; + } + }; + + match mem::replace(state, PeerState::Poisoned) { + // Incoming => Disabled + PeerState::Incoming { mut connections, backoff_until } => { debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - *state = PeerState::Disabled { - open: SmallVec::new(), - banned_until: None - }; + + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", incoming.peer_id, connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close, + }); + *connec_state = ConnectionState::Closing; + } + + *state = PeerState::Disabled { connections, backoff_until }; } peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", @@ -873,212 +1084,309 @@ impl NetworkBehaviour for GenericProto { } fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", - conn, endpoint, peer_id); - match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), endpoint) { - (st @ &mut PeerState::Requested, endpoint) | - (st @ &mut PeerState::PendingRequest { .. }, endpoint) => { + match self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned) { + // Requested | PendingRequest => Enabled + st @ &mut PeerState::Requested | + st @ &mut PeerState::PendingRequest { .. } => { debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", peer_id, endpoint ); - *st = PeerState::Enabled { open: SmallVec::new() }; + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, *conn); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable + event: NotifsHandlerIn::Open }); - } - // Note: it may seem weird that "Banned" peers get treated as if they were absent. - // This is because the word "Banned" means "temporarily prevent outgoing connections to - // this peer", and not "banned" in the sense that we would refuse the peer altogether. - (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) | - (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { - let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; - debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection", - peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - peer_id, incoming_id); - self.peerset.incoming(peer_id.clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: peer_id.clone(), - alive: true, - incoming_id, - }); - *st = PeerState::Incoming { }; + let mut connections = SmallVec::new(); + connections.push((*conn, ConnectionState::Opening)); + *st = PeerState::Enabled { connections }; } - (st @ &mut PeerState::Poisoned, endpoint) | - (st @ &mut PeerState::Banned { .. }, endpoint) => { - let banned_until = if let PeerState::Banned { until } = st { - Some(*until) + // Poisoned gets inserted above if the entry was missing. + // Ø | Backoff => Disabled + st @ &mut PeerState::Poisoned | + st @ &mut PeerState::Backoff { .. } => { + let backoff_until = if let PeerState::Backoff { timer_deadline, .. } = st { + Some(*timer_deadline) } else { None }; debug!(target: "sub-libp2p", - "Libp2p => Connected({},{:?}): Not requested by PSM, disabling.", - peer_id, endpoint); - *st = PeerState::Disabled { open: SmallVec::new(), banned_until }; - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); - } + "Libp2p => Connected({}, {:?}, {:?}): Not requested by PSM, disabling.", + peer_id, endpoint, *conn); - (PeerState::Incoming { .. }, _) => { - debug!(target: "sub-libp2p", - "Secondary connection {:?} to {} waiting for PSM decision.", - conn, peer_id); - }, - - (PeerState::Enabled { .. }, _) => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", - peer_id, conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable - }); + let mut connections = SmallVec::new(); + connections.push((*conn, ConnectionState::Closed)); + *st = PeerState::Disabled { connections, backoff_until }; } - (PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", - peer_id, conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); + // In all other states, add this new connection to the list of closed inactive + // connections. + PeerState::Incoming { connections, .. } | + PeerState::Disabled { connections, .. } | + PeerState::DisabledPendingEnable { connections, .. } | + PeerState::Enabled { connections, .. } => { + debug!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}, {:?}): Secondary connection. Leaving closed.", + peer_id, endpoint, *conn); + connections.push((*conn, ConnectionState::Closed)); } } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.", - conn, endpoint, peer_id); - match self.peers.get_mut(peer_id) { - Some(PeerState::Disabled { open, .. }) | - Some(PeerState::DisabledPendingEnable { open, .. }) | - Some(PeerState::Enabled { open, .. }) => { - // Check if the "link" to the peer is already considered closed, - // i.e. there is no connection that is open for custom protocols, - // in which case `CustomProtocolClosed` was already emitted. - let closed = open.is_empty(); - let sink_closed = open.get(0).map_or(false, |(c, _)| c == conn); - open.retain(|(c, _)| c != conn); - if !closed { - if let Some((_, sink)) = open.get(0) { - if sink_closed { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: peer_id.clone(), - notifications_sink: sink.clone(), + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, _endpoint: &ConnectedPoint) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { + entry + } else { + error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Disabled => Disabled | Backoff | Ø + PeerState::Disabled { mut connections, backoff_until } => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): Disabled.", peer_id, *conn); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + } + + if connections.is_empty() { + if let Some(until) = backoff_until { + let now = Instant::now(); + if until > now { + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(until - now); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *entry.get_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: until, }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } else { + entry.remove(); } } else { - debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - reason: "Disconnected by libp2p".into(), - }; - - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + entry.remove(); } + } else { + *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; } - } - _ => {} - } - } + }, - fn inject_disconnected(&mut self, peer_id: &PeerId) { - match self.peers.remove(peer_id) { - None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | - Some(PeerState::Banned { .. }) => - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", - "`inject_disconnected` called for unknown peer {}", - peer_id), + // DisabledPendingEnable => DisabledPendingEnable | Backoff + PeerState::DisabledPendingEnable { mut connections, timer_deadline, timer } => { + debug!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}): Disabled but pending enable.", + peer_id, *conn + ); - Some(PeerState::Disabled { open, banned_until, .. }) => { - if !open.is_empty() { + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); } - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id); - if let Some(until) = banned_until { - self.peers.insert(peer_id.clone(), PeerState::Banned { until }); + + if connections.is_empty() { + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); + *entry.get_mut() = PeerState::Backoff { timer, timer_deadline }; + + } else { + *entry.get_mut() = PeerState::DisabledPendingEnable { + connections, timer_deadline, timer + }; } - } + }, - Some(PeerState::DisabledPendingEnable { open, timer_deadline, .. }) => { - if !open.is_empty() { + // Incoming => Incoming | Disabled | Backoff | Ø + PeerState::Incoming { mut connections, backoff_until } => { + debug!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}): OpenDesiredByRemote.", + peer_id, *conn + ); + + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + } + + let no_desired_left = !connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::OpenDesiredByRemote) + }); + + // If no connection is `OpenDesiredByRemote` anymore, clean up the peerset incoming + // request. + if no_desired_left { + // In the incoming state, we don't report "Dropped". Instead we will just + // ignore the corresponding Accept/Reject. + if let Some(state) = self.incoming.iter_mut() + .find(|i| i.alive && i.peer_id == *peer_id) + { + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + incoming corresponding to an incoming state in peers"); + debug_assert!(false); + } + } + + if connections.is_empty() { + if let Some(until) = backoff_until { + let now = Instant::now(); + if until > now { + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(until - now); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *entry.get_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: until, + }; + } else { + entry.remove(); + } + } else { + entry.remove(); + } + + } else if no_desired_left { + // If no connection is `OpenDesiredByRemote` anymore, switch to `Disabled`. + *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; + } else { + *entry.get_mut() = PeerState::Incoming { connections, backoff_until }; } - debug!(target: "sub-libp2p", - "Libp2p => Disconnected({}): Was disabled but pending enable.", - peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); } - Some(PeerState::Enabled { open, .. }) => { - if !open.is_empty() { + // Enabled => Enabled | Backoff + // Peers are always backed-off when disconnecting while Enabled. + PeerState::Enabled { mut connections } => { + debug!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}): Enabled.", + peer_id, *conn + ); + + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + let (_, state) = connections.remove(pos); + if let ConnectionState::Open(_) = state { + if let Some((replacement_pos, replacement_sink)) = connections + .iter() + .enumerate() + .filter_map(|(num, (_, s))| { + match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None + } + }) + .next() + { + if pos <= replacement_pos { + debug!(target: "sub-libp2p", "External API <= Sink replaced({})", peer_id); + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: peer_id.clone(), + notifications_sink: replacement_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } else { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } + + } else { + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); } - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); - self.peers.insert(peer_id.clone(), PeerState::Banned { - until: Instant::now() + Duration::from_secs(ban_dur) - }); - } - // In the incoming state, we don't report "Dropped". Instead we will just ignore the - // corresponding Accept/Reject. - Some(PeerState::Incoming { }) => { - if let Some(state) = self.incoming.iter_mut() - .find(|i| i.alive && i.peer_id == *peer_id) + if connections.is_empty() { + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); + let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(Duration::from_secs(ban_dur)); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *entry.get_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: Instant::now() + Duration::from_secs(ban_dur), + }; + + } else if !connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) { - debug!(target: "sub-libp2p", - "Libp2p => Disconnected({}): Was in incoming mode with id {:?}.", - peer_id, state.incoming_id); - state.alive = false; + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()); + + *entry.get_mut() = PeerState::Disabled { + connections, + backoff_until: None + }; + } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ - corresponding to an incoming state in peers") + *entry.get_mut() = PeerState::Enabled { connections }; } } - Some(PeerState::Poisoned) => - error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id), + PeerState::Requested | + PeerState::PendingRequest { .. } | + PeerState::Backoff { .. } => { + // This is a serious bug either in this state machine or in libp2p. + error!(target: "sub-libp2p", + "`inject_connection_closed` called for unknown peer {}", + peer_id); + debug_assert!(false); + }, + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id); + debug_assert!(false); + }, } } + fn inject_disconnected(&mut self, _peer_id: &PeerId) { + } + fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) { trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); } @@ -1087,19 +1395,39 @@ impl NetworkBehaviour for GenericProto { if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // The peer is not in our list. - st @ PeerState::Banned { .. } => { + st @ PeerState::Backoff { .. } => { trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); *entry.into_mut() = st; }, // "Basic" situation: we failed to reach a peer that the peerset requested. - PeerState::Requested | PeerState::PendingRequest { .. } => { + st @ PeerState::Requested | + st @ PeerState::PendingRequest { .. } => { debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = PeerState::Banned { - until: Instant::now() + Duration::from_secs(5) - }; + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()) + self.peerset.dropped(peer_id.clone()); + + let now = Instant::now(); + let ban_duration = match st { + PeerState::PendingRequest { timer_deadline, .. } if timer_deadline > now => + cmp::max(timer_deadline - now, Duration::from_secs(5)), + _ => Duration::from_secs(5) + }; + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(ban_duration); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *entry.into_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: now + ban_duration, + }; }, // We can still get dial failures even if we are already connected to the peer, @@ -1110,8 +1438,10 @@ impl NetworkBehaviour for GenericProto { *entry.into_mut() = st; }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id); + debug_assert!(false); + }, } } else { @@ -1127,123 +1457,271 @@ impl NetworkBehaviour for GenericProto { event: NotifsHandlerOut, ) { match event { - NotifsHandlerOut::Closed { endpoint, reason } => { + NotifsHandlerOut::OpenDesiredByRemote => { debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint {:?} closed for custom protocols: {}", - source, endpoint, reason); + "Handler({:?}, {:?}]) => OpenDesiredByRemote", + source, connection); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { entry } else { - error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); + error!(target: "sub-libp2p", "OpenDesiredByRemote: State mismatch in the custom protos handler"); + debug_assert!(false); return }; - let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { mut open } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Incoming => Incoming + PeerState::Incoming { mut connections, backoff_until } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::OpenDesiredByRemote))); + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesiredByRemote; + } else { + // Connections in `OpeningThenClosing` state are in a Closed phase, + // and as such can emit `OpenDesiredByRemote` messages. + // Since an `Open` and a `Close` messages have already been sent, + // there is nothing much that can be done about this anyway. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing + )); + } } else { - debug_assert!(false); error!( target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); + debug_assert!(false); } - // TODO: We switch the entire peer state to "disabled" because of possible - // race conditions involving the legacy substream. - // Once https://github.com/paritytech/substrate/issues/5670 is done, this - // should be changed to stay in the `Enabled` state. - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); - self.peerset.dropped(source.clone()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: source.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); + *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; + }, - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", source, connection); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open, + }); + *connec_state = ConnectionState::Opening; } else { - None - }); - - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None - }; - - (last, new_notifications_sink) - }, - PeerState::Disabled { mut open, banned_until } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); + // Connections in `OpeningThenClosing` and `Opening` are in a Closed + // phase, and as such can emit `OpenDesiredByRemote` messages. + // Since an `Open` message haS already been sent, there is nothing + // more to do. + debug_assert!(matches!( + connec_state, + ConnectionState::OpenDesiredByRemote | ConnectionState::Opening + )); + } } else { - debug_assert!(false); error!( target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); + debug_assert!(false); } - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) - } else { - None - }); + *entry.into_mut() = PeerState::Enabled { connections }; + }, - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - }; + // Disabled => Disabled | Incoming + PeerState::Disabled { mut connections, backoff_until } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesiredByRemote; - (last, new_notifications_sink) - }, - PeerState::DisabledPendingEnable { - mut open, - timer, - timer_deadline - } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 += 1; + + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(source.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source.clone(), + alive: true, + incoming_id, + }); + + *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; + + } else { + // Connections in `OpeningThenClosing` are in a Closed phase, and + // as such can emit `OpenDesiredByRemote` messages. + // We ignore them. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing + )); + } } else { - debug_assert!(false); error!( target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); + debug_assert!(false); } + } + + // DisabledPendingEnable => DisabledPendingEnable | Incoming + PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesiredByRemote; + + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return + } + }; + + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(source.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source.clone(), + alive: true, + incoming_id, + }); + + *entry.into_mut() = PeerState::Incoming { + connections, + backoff_until: Some(timer_deadline), + }; - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) } else { - None - }); + // Connections in `OpeningThenClosing` are in a Closed phase, and + // as such can emit `OpenDesiredByRemote` messages. + // We ignore them. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing + )); + *entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer, + timer_deadline, + }; + } + } else { + error!( + target: "sub-libp2p", + "OpenDesiredByRemote: State mismatch in the custom protos handler" + ); + debug_assert!(false); + } + } - *entry.into_mut() = PeerState::DisabledPendingEnable { - open, - timer, - timer_deadline + state => { + error!(target: "sub-libp2p", + "OpenDesiredByRemote: Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + return + } + }; + } + + NotifsHandlerOut::CloseDesired => { + debug!(target: "sub-libp2p", + "Handler({}, {:?}) => CloseDesired", + source, connection); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { + entry + } else { + error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Enabled => Enabled | Disabled + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + let pos = if let Some(pos) = connections.iter().position(|(c, _)| *c == connection) { + pos + } else { + error!(target: "sub-libp2p", + "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); + return; }; - (last, new_notifications_sink) + if matches!(connections[pos].1, ConnectionState::Closing) { + return; + } + + debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_))); + connections[pos].1 = ConnectionState::Closing; + + debug!(target: "sub-libp2p", "Handler({}, {:?}) <= Close", source, connection); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source.clone(), + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Close, + }); + + if let Some((replacement_pos, replacement_sink)) = connections + .iter() + .enumerate() + .filter_map(|(num, (_, s))| { + match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None + } + }) + .next() + { + if pos <= replacement_pos { + debug!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: source, + notifications_sink: replacement_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + *entry.into_mut() = PeerState::Enabled { connections, }; + } + + } else { + // List of open connections wasn't empty before but now it is. + if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening)) { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(source.clone()); + *entry.into_mut() = PeerState::Disabled { + connections, backoff_until: None + }; + } else { + *entry.into_mut() = PeerState::Enabled { connections }; + } + + debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: source, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + }, + + // All connections in `Disabled` and `DisabledPendingEnable` have been sent a + // `Close` message already, and as such ignore any `CloseDesired` message. + state @ PeerState::Disabled { .. } | + state @ PeerState::DisabledPendingEnable { .. } => { + *entry.into_mut() = state; + return; }, state => { error!(target: "sub-libp2p", @@ -1251,103 +1729,227 @@ impl NetworkBehaviour for GenericProto { state); return } - }; + } + } - if last { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = GenericProtoOut::CustomProtocolClosed { - reason, - peer_id: source, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + NotifsHandlerOut::CloseResult => { + debug!(target: "sub-libp2p", + "Handler({}, {:?}) => CloseResult", + source, connection); + + match self.peers.get_mut(&source) { + // Move the connection from `Closing` to `Closed`. + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) | + Some(PeerState::Enabled { connections, .. }) => { + if let Some((_, connec_state)) = connections + .iter_mut() + .find(|(c, s)| *c == connection && matches!(s, ConnectionState::Closing)) + { + *connec_state = ConnectionState::Closed; + } else { + error!(target: "sub-libp2p", + "CloseResult: State mismatch in the custom protos handler"); + debug_assert!(false); + } + }, - } else { - if let Some(new_notifications_sink) = new_notifications_sink { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: source, - notifications_sink: new_notifications_sink, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + state => { + error!(target: "sub-libp2p", + "CloseResult: Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); } - debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); } } - NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } => { + NotifsHandlerOut::OpenResultOk { received_handshake, notifications_sink, .. } => { debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint {:?} open for custom protocols.", - source, endpoint); - - let first = match self.peers.get_mut(&source) { - Some(PeerState::Enabled { ref mut open, .. }) | - Some(PeerState::DisabledPendingEnable { ref mut open, .. }) | - Some(PeerState::Disabled { ref mut open, .. }) => { - let first = open.is_empty(); - if !open.iter().any(|(c, _)| *c == connection) { - open.push((connection, notifications_sink.clone())); + "Handler({}, {:?}) => OpenResultOk", + source, connection); + + match self.peers.get_mut(&source) { + Some(PeerState::Enabled { connections, .. }) => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + let any_open = connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::Opening)) + { + if !any_open { + debug!(target: "sub-libp2p", "External API <= Open({:?})", source); + let event = GenericProtoOut::CustomProtocolOpen { + peer_id: source, + received_handshake, + notifications_sink: notifications_sink.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + *connec_state = ConnectionState::Open(notifications_sink); + } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; } else { - error!( - target: "sub-libp2p", - "State mismatch: connection with {} opened a second time", - source - ); + debug_assert!(false); + error!(target: "sub-libp2p", + "OpenResultOk State mismatch in the custom protos handler"); + } + }, + + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultOk State mismatch in the custom protos handler"); + debug_assert!(false); } - first } + state => { error!(target: "sub-libp2p", - "Open: Unexpected state in the custom protos handler: {:?}", + "OpenResultOk: Unexpected state in the custom protos handler: {:?}", state); + debug_assert!(false); return } + } + } + + NotifsHandlerOut::OpenResultErr => { + debug!(target: "sub-libp2p", + "Handler({:?}, {:?}) => OpenResultErr", + source, connection); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { + entry + } else { + error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::Opening)) + { + *connec_state = ConnectionState::Closed; + } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + if !connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(source.clone()); + + *entry.into_mut() = PeerState::Disabled { + connections, + backoff_until: None + }; + } else { + *entry.into_mut() = PeerState::Enabled { connections }; + } + }, + PeerState::Disabled { mut connections, backoff_until } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + *entry.into_mut() = PeerState::Disabled { connections, backoff_until }; + }, + PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) + { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + *entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer, + timer_deadline, + }; + }, + state => { + error!(target: "sub-libp2p", + "Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + } }; + } - if first { - debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = GenericProtoOut::CustomProtocolOpen { + NotifsHandlerOut::CustomMessage { message } => { + if self.is_open(&source) { + trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); + trace!(target: "sub-libp2p", "External API <= Message({:?})", source); + let event = GenericProtoOut::LegacyMessage { peer_id: source, - received_handshake, - notifications_sink + message, }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } else { - debug!( + trace!( target: "sub-libp2p", - "Handler({:?}) => Secondary connection opened custom protocol", - source + "Handler({:?}) => Post-close message. Dropping message.", + source, ); } } - NotifsHandlerOut::CustomMessage { message } => { - debug_assert!(self.is_open(&source)); - trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); - trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = GenericProtoOut::LegacyMessage { - peer_id: source, - message, - }; - - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - NotifsHandlerOut::Notification { protocol_name, message } => { - debug_assert!(self.is_open(&source)); - trace!( - target: "sub-libp2p", - "Handler({:?}) => Notification({:?})", - source, - protocol_name, - ); - trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); - let event = GenericProtoOut::Notification { - peer_id: source, - protocol_name, - message, - }; + if self.is_open(&source) { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Notification({:?})", + source, + protocol_name, + ); + trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); + let event = GenericProtoOut::Notification { + peer_id: source, + protocol_name, + message, + }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } else { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Post-close notification({:?})", + source, + protocol_name, + ); + } } } } @@ -1400,6 +2002,11 @@ impl NetworkBehaviour for GenericProto { }; match peer_state { + PeerState::Backoff { timer, .. } if *timer == delay_id => { + debug!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); + self.peers.remove(&peer_id); + } + PeerState::PendingRequest { timer, .. } if *timer == delay_id => { debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); self.events.push_back(NetworkBehaviourAction::DialPeer { @@ -1409,14 +2016,33 @@ impl NetworkBehaviour for GenericProto { *peer_state = PeerState::Requested; } - PeerState::DisabledPendingEnable { timer, open, .. } if *timer == delay_id => { - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *peer_state = PeerState::Enabled { open: mem::replace(open, Default::default()) }; + PeerState::DisabledPendingEnable { connections, timer, timer_deadline } + if *timer == delay_id => + { + // The first element of `closed` is chosen to open the notifications substream. + if let Some((connec_id, connec_state)) = connections.iter_mut() + .find(|(_, s)| matches!(s, ConnectionState::Closed)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open (ban expired)", + peer_id, *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open, + }); + *connec_state = ConnectionState::Opening; + *peer_state = PeerState::Enabled { + connections: mem::replace(connections, Default::default()), + }; + } else { + *timer_deadline = Instant::now() + Duration::from_secs(5); + let delay = futures_timer::Delay::new(Duration::from_secs(5)); + let timer = *timer; + self.delays.push(async move { + delay.await; + (timer, peer_id) + }.boxed()); + } } // We intentionally never remove elements from `delays`, and it may diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index 5845130a7db87..0272261f67d57 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -1,27 +1,1054 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify +// Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// This program is distributed in the hope that it will be useful, +// Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with this program. If not, see . +// along with Substrate. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming +//! and outgoing substreams for all gossiping protocols together. +//! +//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the +//! protocols that are Substrate-related and outside of the scope of libp2p. +//! +//! # Usage +//! +//! From an API perspective, the [`NotifsHandler`] is always in one of the following state (see [`State`]): +//! +//! - Closed substreams. This is the initial state. +//! - Closed substreams, but remote desires them to be open. +//! - Open substreams. +//! - Open substreams, but remote desires them to be closed. +//! +//! The [`NotifsHandler`] can spontaneously switch between these states: +//! +//! - "Closed substreams" to "Closed substreams but open desired". When that happens, a +//! [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted. +//! - "Closed substreams but open desired" to "Closed substreams" (i.e. the remote has cancelled +//! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted. +//! - "Open substreams" to "Open substreams but close desired". When that happens, a +//! [`NotifsHandlerOut::CloseDesired`] is emitted. +//! +//! The user can instruct the `NotifsHandler` to switch from "closed" to "open" or vice-versa by +//! sending either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`]. The `NotifsHandler` +//! must answer with [`NotifsHandlerOut::OpenResultOk`] or [`NotifsHandlerOut::OpenResultErr`], or +//! with [`NotifsHandlerOut::CloseResult`]. +//! +//! When a [`NotifsHandlerOut::OpenResultOk`] is emitted, the `NotifsHandler` is now in the open +//! state. When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is +//! emitted, the `NotifsHandler` is now (or remains) in the closed state. +//! +//! When a [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted, the user should always send back either a +//! [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the remote will +//! be left in a pending state. +//! +//! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted +//! [`NotifsHandlerIn::Open`] has gotten an answer. + +use crate::protocol::generic_proto::{ + upgrade::{ + NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream, + NotificationsHandshakeError, RegisteredProtocol, RegisteredProtocolSubstream, + RegisteredProtocolEvent, UpgradeCollec + }, +}; -pub use self::group::{ - NotificationsSink, NotifsHandlerError, Ready, NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut +use bytes::BytesMut; +use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{SelectUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, }; -pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; +use futures::{ + channel::mpsc, + lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, + prelude::* +}; +use log::error; +use parking_lot::{Mutex, RwLock}; +use smallvec::SmallVec; +use std::{borrow::Cow, collections::VecDeque, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration}; +use wasm_timer::Instant; + +/// Number of pending notifications in asynchronous contexts. +/// See [`NotificationsSink::reserve_notification`] for context. +const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; + +/// Number of pending notifications in synchronous contexts. +const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; + +/// Maximum duration to open a substream and receive the handshake message. After that, we +/// consider that we failed to open the substream. +const OPEN_TIMEOUT: Duration = Duration::from_secs(10); + +/// After successfully establishing a connection with the remote, we keep the connection open for +/// at least this amount of time in order to give the rest of the code the chance to notify us to +/// open substreams. +const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsHandler`]. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandlerProto { + /// Prototypes for upgrades for inbound substreams, and the message we respond with in the + /// handshake. + in_protocols: Vec<(NotificationsIn, Arc>>)>, + + /// Name of protocols available for outbound substreams, and the initial handshake message we + /// send. + out_protocols: Vec<(Cow<'static, str>, Arc>>)>, + + /// Configuration for the legacy protocol upgrade. + legacy_protocol: RegisteredProtocol, +} + +/// The actual handler once the connection has been established. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandler { + /// Prototypes for upgrades for inbound substreams, and the message we respond with in the + /// handshake. + in_protocols: Vec<(NotificationsIn, Arc>>)>, + + /// Name of protocols available for outbound substreams, and the initial handshake message we + /// send. + out_protocols: Vec<(Cow<'static, str>, Arc>>)>, + + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, + + /// Whether we are the connection dialer or listener. + endpoint: ConnectedPoint, + + /// State of this handler. + state: State, + + /// Configuration for the legacy protocol upgrade. + legacy_protocol: RegisteredProtocol, + + /// The substreams where bidirectional communications happen. + legacy_substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, + + /// Contains substreams which are being shut down. + legacy_shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, + + /// Events to return in priority from `poll`. + events_queue: VecDeque< + ProtocolsHandlerEvent + >, +} + +/// See the module-level documentation to learn about the meaning of these variants. +enum State { + /// Handler is in the "Closed" state. + Closed { + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// a boolean indicating whether an outgoing substream is still in the process of being + /// opened. + pending_opening: Vec, + }, + + /// Handler is in the "Closed" state. A [`NotifsHandlerOut::OpenDesiredByRemote`] has been emitted. + OpenDesiredByRemote { + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that hasn't been accepted/rejected yet. + /// + /// Must always contain at least one `Some`. + in_substreams: Vec>>, + + /// See [`State::Closed::pending_opening`]. + pending_opening: Vec, + }, + + /// Handler is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is + /// consequently trying to open the various notifications substreams. + /// + /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must + /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. + Opening { + /// In the situation where either the legacy substream has been opened or the + /// handshake-bearing notifications protocol is open, but we haven't sent out any + /// [`NotifsHandlerOut::Open`] event yet, this contains the received handshake waiting to + /// be reported through the external API. + pending_handshake: Option>, + + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that has been accepted. + /// + /// Contrary to [`State::OpenDesiredByRemote::in_substreams`], it is possible for this to + /// contain only `None`s. + in_substreams: Vec>>, + + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// an outbound substream that has been accepted by the remote. + /// + /// Items that contain `None` mean that a substream is still being opened or has been + /// rejected by the remote. In other words, this `Vec` is kind of a mirror version of + /// [`State::Closed::pending_opening`]. + /// + /// Items that contain `Some(None)` have been rejected by the remote, most likely because + /// they don't support this protocol. At the time of writing, the external API doesn't + /// distinguish between the different protocols. From the external API's point of view, + /// either all protocols are open or none are open. In reality, light clients in particular + /// don't support for example the GrandPa protocol, and as such will refuse our outgoing + /// attempts. This is problematic in theory, but in practice this is handled properly at a + /// higher level. This flaw will fixed once the outer layers know to differentiate the + /// multiple protocols. + out_substreams: Vec>>>, + }, + + /// Handler is in the "Open" state. + Open { + /// Contains the two `Receiver`s connected to the [`NotificationsSink`] that has been + /// sent out. The notifications to send out can be pulled from this receivers. + /// We use two different channels in order to have two different channel sizes, but from + /// the receiving point of view, the two channels are the same. + /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + notifications_sink_rx: stream::Select< + stream::Fuse>, + stream::Fuse> + >, + + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// an outbound substream that has been accepted by the remote. + /// + /// On transition to [`State::Open`], all the elements must be `Some`. Elements are + /// switched to `None` only if the remote closes substreams, in which case `want_closed` + /// must be true. + out_substreams: Vec>>, + + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that has been accepted. + /// + /// Contrary to [`State::OpenDesiredByRemote::in_substreams`], it is possible for this to + /// contain only `None`s. + in_substreams: Vec>>, + + /// If true, at least one substream in [`State::Open::out_substreams`] has been closed or + /// reset by the remote and a [`NotifsHandlerOut::CloseDesired`] message has been sent + /// out. + want_closed: bool, + }, +} + +impl IntoProtocolsHandler for NotifsHandlerProto { + type Handler = NotifsHandler; + + fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { + let in_protocols = self.in_protocols.iter() + .map(|(h, _)| h.clone()) + .collect::>(); + + SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()) + } + + fn into_handler(self, _: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + let num_out_proto = self.out_protocols.len(); + + NotifsHandler { + in_protocols: self.in_protocols, + out_protocols: self.out_protocols, + endpoint: connected_point.clone(), + when_connection_open: Instant::now(), + state: State::Closed { + pending_opening: (0..num_out_proto).map(|_| false).collect(), + }, + legacy_protocol: self.legacy_protocol, + legacy_substreams: SmallVec::new(), + legacy_shutdown: SmallVec::new(), + events_queue: VecDeque::with_capacity(16), + } + } +} + +/// Event that can be received by a `NotifsHandler`. +#[derive(Debug, Clone)] +pub enum NotifsHandlerIn { + /// Instruct the handler to open the notification substreams. + /// + /// Must always be answered by a [`NotifsHandlerOut::OpenResultOk`] or a + /// [`NotifsHandlerOut::OpenResultErr`] event. + /// + /// Importantly, it is forbidden to send a [`NotifsHandlerIn::Open`] while a previous one is + /// already in the fly. It is however possible if a `Close` is still in the fly. + Open, + + /// Instruct the handler to close the notification substreams, or reject any pending incoming + /// substream request. + /// + /// Must always be answered by a [`NotifsHandlerOut::CloseResult`] event. + Close, +} + +/// Event that can be emitted by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerOut { + /// Acknowledges a [`NotifsHandlerIn::Open`]. + OpenResultOk { + /// The endpoint of the connection that is open for custom protocols. + endpoint: ConnectedPoint, + /// Handshake that was sent to us. + /// This is normally a "Status" message, but this out of the concern of this code. + received_handshake: Vec, + /// How notifications can be sent to this node. + notifications_sink: NotificationsSink, + }, + + /// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open + /// notification substreams. + OpenResultErr, + + /// Acknowledges a [`NotifsHandlerIn::Close`]. + CloseResult, + + /// The remote would like the substreams to be open. Send a [`NotifsHandlerIn::Open`] or a + /// [`NotifsHandlerIn::Close`] in order to either accept or deny this request. If a + /// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not + /// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send + /// another [`NotifsHandlerIn`]. + OpenDesiredByRemote, + + /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in + /// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet + /// been acknowledged by a [`NotifsHandlerOut::CloseResult`], then you don't need to a send + /// another one. + CloseDesired, + + /// Received a non-gossiping message on the legacy substream. + /// + /// Can only happen when the handler is in the open state. + CustomMessage { + /// Message that has been received. + /// + /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a + /// notification. + message: BytesMut, + }, + + /// Received a message on a custom protocol substream. + /// + /// Can only happen when the handler is in the open state. + Notification { + /// Name of the protocol of the message. + protocol_name: Cow<'static, str>, + + /// Message that has been received. + message: BytesMut, + }, +} + +/// Sink connected directly to the node background task. Allows sending notifications to the peer. +/// +/// Can be cloned in order to obtain multiple references to the same peer. +#[derive(Debug, Clone)] +pub struct NotificationsSink { + inner: Arc, +} + +#[derive(Debug)] +struct NotificationsSinkInner { + /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. + async_channel: FuturesMutex>, + /// Sender to use in synchronous contexts. Uses a synchronous mutex. + /// This channel has a large capacity and is meant to be used in contexts where + /// back-pressure cannot be properly exerted. + /// It will be removed in a future version. + sync_channel: Mutex>, +} + +/// Message emitted through the [`NotificationsSink`] and processed by the background task +/// dedicated to the peer. +#[derive(Debug)] +enum NotificationsSinkMessage { + /// Message emitted by [`NotificationsSink::reserve_notification`] and + /// [`NotificationsSink::write_notification_now`]. + Notification { + protocol_name: Cow<'static, str>, + message: Vec, + }, + + /// Must close the connection. + ForceClose, +} + +impl NotificationsSink { + /// Sends a notification to the peer. + /// + /// If too many messages are already buffered, the notification is silently discarded and the + /// connection to the peer will be closed shortly after. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + /// + /// This method will be removed in a future version. + pub fn send_sync_notification<'a>( + &'a self, + protocol_name: Cow<'static, str>, + message: impl Into> + ) { + let mut lock = self.inner.sync_channel.lock(); + let result = lock.try_send(NotificationsSinkMessage::Notification { + protocol_name, + message: message.into() + }); + + if result.is_err() { + // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the + // buffer, and therefore `try_send` will succeed. + let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); + debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + } + } + + /// Wait until the remote is ready to accept a notification. + /// + /// Returns an error in the case where the connection is closed. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result, ()> { + let mut lock = self.inner.async_channel.lock().await; + + let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; + if poll_ready.is_ok() { + Ok(Ready { protocol_name: protocol_name, lock }) + } else { + Err(()) + } + } +} + +/// Notification slot is reserved and the notification can actually be sent. +#[must_use] +#[derive(Debug)] +pub struct Ready<'a> { + /// Guarded channel. The channel inside is guaranteed to not be full. + lock: FuturesMutexGuard<'a, mpsc::Sender>, + /// Name of the protocol. Should match one of the protocols passed at initialization. + protocol_name: Cow<'static, str>, +} + +impl<'a> Ready<'a> { + /// Consumes this slots reservation and actually queues the notification. + /// + /// Returns an error if the substream has been closed. + pub fn send( + mut self, + notification: impl Into> + ) -> Result<(), ()> { + self.lock.start_send(NotificationsSinkMessage::Notification { + protocol_name: self.protocol_name, + message: notification.into(), + }).map_err(|_| ()) + } +} + +/// Error specific to the collection of protocols. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum NotifsHandlerError { + /// Channel of synchronous notifications is full. + SyncNotificationsClogged, +} + +impl NotifsHandlerProto { + /// Builds a new handler. + /// + /// `list` is a list of notification protocols names, and the message to send as part of the + /// handshake. At the moment, the message is always the same whether we open a substream + /// ourselves or respond to handshake from the remote. + /// + /// The first protocol in `list` is special-cased as the protocol that contains the handshake + /// to report through the [`NotifsHandlerOut::Open`] event. + /// + /// # Panic + /// + /// - Panics if `list` is empty. + /// + pub fn new( + legacy_protocol: RegisteredProtocol, + list: impl Into, Arc>>)>>, + ) -> Self { + let list = list.into(); + assert!(!list.is_empty()); + + let out_protocols = list + .clone() + .into_iter() + .collect(); + + let in_protocols = list.clone() + .into_iter() + .map(|(proto_name, msg)| (NotificationsIn::new(proto_name), msg)) + .collect(); + + NotifsHandlerProto { + in_protocols, + out_protocols, + legacy_protocol, + } + } +} + +impl ProtocolsHandler for NotifsHandler { + type InEvent = NotifsHandlerIn; + type OutEvent = NotifsHandlerOut; + type Error = NotifsHandlerError; + type InboundProtocol = SelectUpgrade, RegisteredProtocol>; + type OutboundProtocol = NotificationsOut; + // Index within the `out_protocols`. + type OutboundOpenInfo = usize; + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + let in_protocols = self.in_protocols.iter() + .map(|(h, _)| h.clone()) + .collect::>(); + + let proto = SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()); + SubstreamProtocol::new(proto, ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + out: >::Output, + (): () + ) { + match out { + // Received notifications substream. + EitherOutput::First(((_remote_handshake, mut proto), num)) => { + match &mut self.state { + State::Closed { pending_opening } => { + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenDesiredByRemote + )); + + let mut in_substreams = (0..self.in_protocols.len()) + .map(|_| None) + .collect::>(); + in_substreams[num] = Some(proto); + self.state = State::OpenDesiredByRemote { + in_substreams, + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + }, + State::OpenDesiredByRemote { in_substreams, .. } => { + if in_substreams[num].is_some() { + // If a substream already exists, silently drop the new one. + // Note that we drop the substream, which will send an equivalent to a + // TCP "RST" to the remote and force-close the substream. It might + // seem like an unclean way to get rid of a substream. However, keep + // in mind that it is invalid for the remote to open multiple such + // substreams, and therefore sending a "RST" is the most correct thing + // to do. + return; + } + in_substreams[num] = Some(proto); + }, + State::Opening { in_substreams, .. } | + State::Open { in_substreams, .. } => { + if in_substreams[num].is_some() { + // Same remark as above. + return; + } + + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = self.in_protocols[num].1.read().clone(); + proto.send_handshake(handshake_message); + in_substreams[num] = Some(proto); + }, + }; + } + + // Received legacy substream. + EitherOutput::Second((substream, _handshake)) => { + // Note: while we awknowledge legacy substreams and handle incoming messages, + // it doesn't trigger any `OpenDesiredByRemote` event as a way to simplify the + // logic of this code. + // Since mid-2019, legacy substreams are supposed to be used at the same time as + // notifications substreams, and not in isolation. Nodes that open legacy + // substreams in isolation are considered deprecated. + if self.legacy_substreams.len() <= 4 { + self.legacy_substreams.push(substream); + } + }, + } + } + + fn inject_fully_negotiated_outbound( + &mut self, + (handshake, substream): >::Output, + num: Self::OutboundOpenInfo + ) { + match &mut self.state { + State::Closed { pending_opening } | + State::OpenDesiredByRemote { pending_opening, .. } => { + debug_assert!(pending_opening[num]); + pending_opening[num] = false; + } + State::Open { .. } => { + error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler"); + debug_assert!(false); + } + State::Opening { pending_handshake, in_substreams, out_substreams } => { + debug_assert!(out_substreams[num].is_none()); + out_substreams[num] = Some(Some(substream)); + + if num == 0 { + debug_assert!(pending_handshake.is_none()); + *pending_handshake = Some(handshake); + } + + if !out_substreams.iter().any(|s| s.is_none()) { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + debug_assert!(pending_handshake.is_some()); + let pending_handshake = pending_handshake.take().unwrap_or_default(); + + let out_substreams = out_substreams + .drain(..) + .map(|s| s.expect("checked by the if above; qed")) + .collect(); + + self.state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + out_substreams, + in_substreams: mem::replace(in_substreams, Vec::new()), + want_closed: false, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultOk { + endpoint: self.endpoint.clone(), + received_handshake: pending_handshake, + notifications_sink + } + )); + } + } + } + } + + fn inject_event(&mut self, message: NotifsHandlerIn) { + match message { + NotifsHandlerIn::Open => { + match &mut self.state { + State::Closed { .. } | State::OpenDesiredByRemote { .. } => { + let (pending_opening, mut in_substreams) = match &mut self.state { + State::Closed { pending_opening } => (pending_opening, None), + State::OpenDesiredByRemote { pending_opening, in_substreams } => + (pending_opening, Some(mem::replace(in_substreams, Vec::new()))), + _ => unreachable!() + }; + + for (n, is_pending) in pending_opening.iter().enumerate() { + if *is_pending { + continue; + } + + let proto = NotificationsOut::new( + self.out_protocols[n].0.clone(), + self.out_protocols[n].1.read().clone() + ); + + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, n) + .with_timeout(OPEN_TIMEOUT), + }); + } + + if let Some(in_substreams) = in_substreams.as_mut() { + for (num, substream) in in_substreams.iter_mut().enumerate() { + let substream = match substream.as_mut() { + Some(s) => s, + None => continue, + }; + + let handshake_message = self.in_protocols[num].1.read().clone(); + substream.send_handshake(handshake_message); + } + } + + self.state = State::Opening { + pending_handshake: None, + in_substreams: if let Some(in_substreams) = in_substreams { + in_substreams + } else { + (0..self.in_protocols.len()).map(|_| None).collect() + }, + out_substreams: (0..self.out_protocols.len()).map(|_| None).collect(), + }; + }, + State::Opening { .. } | + State::Open { .. } => { + // As documented, it is forbidden to send an `Open` while there is already + // one in the fly. + error!(target: "sub-libp2p", "opening already-opened handler"); + debug_assert!(false); + }, + } + }, + + NotifsHandlerIn::Close => { + for mut substream in self.legacy_substreams.drain() { + substream.shutdown(); + self.legacy_shutdown.push(substream); + } + + match &mut self.state { + State::Open { .. } => { + self.state = State::Closed { + pending_opening: Vec::new(), + }; + }, + State::Opening { out_substreams, .. } => { + let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect(); + self.state = State::Closed { + pending_opening, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); + }, + State::OpenDesiredByRemote { pending_opening, .. } => { + self.state = State::Closed { + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + } + State::Closed { .. } => {}, + } + + self.events_queue.push_back( + ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseResult) + ); + }, + } + } + + fn inject_dial_upgrade_error( + &mut self, + num: usize, + _: ProtocolsHandlerUpgrErr + ) { + match &mut self.state { + State::Closed { pending_opening } | State::OpenDesiredByRemote { pending_opening, .. } => { + debug_assert!(pending_opening[num]); + pending_opening[num] = false; + } + + State::Opening { in_substreams, pending_handshake, out_substreams } => { + // Failing to open a substream isn't considered a failure. Instead, it is marked + // as `Some(None)` and the opening continues. + + out_substreams[num] = Some(None); + + // Some substreams are still being opened. Nothing more to do. + if out_substreams.iter().any(|s| s.is_none()) { + return; + } + + // All substreams have finished being open. + // If the handshake has been received, proceed and report the opening. + + if let Some(pending_handshake) = pending_handshake.take() { + // Open! + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + let out_substreams = out_substreams + .drain(..) + .map(|s| s.expect("checked by the if above; qed")) + .collect(); + + self.state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + out_substreams, + in_substreams: mem::replace(in_substreams, Vec::new()), + want_closed: false, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultOk { + endpoint: self.endpoint.clone(), + received_handshake: pending_handshake, + notifications_sink + } + )); + + } else { + // Open failure! + self.state = State::Closed { + pending_opening: (0..self.out_protocols.len()).map(|_| false).collect(), + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); + } + } + + // No substream is being open when already `Open`. + State::Open { .. } => debug_assert!(false), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + if !self.legacy_substreams.is_empty() { + return KeepAlive::Yes; + } + + match self.state { + State::Closed { .. } => KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), + State::OpenDesiredByRemote { .. } | State::Opening { .. } | State::Open { .. } => + KeepAlive::Yes, + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent + > { + if let Some(ev) = self.events_queue.pop_front() { + return Poll::Ready(ev); + } + + // Poll inbound substreams. + // Inbound substreams being closed is always tolerated, except for the + // `OpenDesiredByRemote` state which might need to be switched back to `Closed`. + match &mut self.state { + State::Closed { .. } => {} + State::Open { in_substreams, .. } => { + for (num, substream) in in_substreams.iter_mut().enumerate() { + match substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { + None | Some(Poll::Pending) => continue, + Some(Poll::Ready(Some(Ok(message)))) => { + let event = NotifsHandlerOut::Notification { + message, + protocol_name: self.in_protocols[num].0.protocol_name().clone(), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) + }, + Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => + *substream = None, + } + } + } + + State::OpenDesiredByRemote { in_substreams, .. } | + State::Opening { in_substreams, .. } => { + for substream in in_substreams { + match substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { + None | Some(Poll::Pending) => continue, + Some(Poll::Ready(Ok(void))) => match void {}, + Some(Poll::Ready(Err(_))) => *substream = None, + } + } + } + } + + // Since the previous block might have closed inbound substreams, make sure that we can + // stay in `OpenDesiredByRemote` state. + if let State::OpenDesiredByRemote { in_substreams, pending_opening } = &mut self.state { + if !in_substreams.iter().any(|s| s.is_some()) { + self.state = State::Closed { + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )) + } + } + + // Poll outbound substreams. + match &mut self.state { + State::Open { out_substreams, want_closed, .. } => { + let mut any_closed = false; + + for substream in out_substreams.iter_mut() { + match substream.as_mut().map(|s| Sink::poll_flush(Pin::new(s), cx)) { + None | Some(Poll::Pending) | Some(Poll::Ready(Ok(()))) => continue, + Some(Poll::Ready(Err(_))) => {} + }; + + // Reached if the substream has been closed. + *substream = None; + any_closed = true; + } + + if any_closed { + if !*want_closed { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseDesired)); + } + } + } + + State::Opening { out_substreams, pending_handshake, .. } => { + debug_assert!(out_substreams.iter().any(|s| s.is_none())); + + for (num, substream) in out_substreams.iter_mut().enumerate() { + match substream { + None | Some(None) => continue, + Some(Some(substream)) => match Sink::poll_flush(Pin::new(substream), cx) { + Poll::Pending | Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(_)) => {} + } + } + + // Reached if the substream has been closed. + *substream = Some(None); + if num == 0 { + // Cancel the handshake. + *pending_handshake = None; + } + } + } + + State::Closed { .. } | + State::OpenDesiredByRemote { .. } => {} + } + + if let State::Open { notifications_sink_rx, out_substreams, .. } = &mut self.state { + 'poll_notifs_sink: loop { + // Before we poll the notifications sink receiver, check that all the notification + // channels are ready to send a message. + // TODO: it is planned that in the future we switch to one `NotificationsSink` per + // protocol, in which case each sink should wait only for its corresponding handler + // to be ready, and not all handlers + // see https://github.com/paritytech/substrate/issues/5670 + for substream in out_substreams.iter_mut() { + match substream.as_mut().map(|s| s.poll_ready_unpin(cx)) { + None | Some(Poll::Ready(_)) => {}, + Some(Poll::Pending) => break 'poll_notifs_sink + } + } + + // Now that all substreams are ready for a message, grab what to send. + let message = match notifications_sink_rx.poll_next_unpin(cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) | Poll::Pending => break, + }; + + match message { + NotificationsSinkMessage::Notification { + protocol_name, + message + } => { + if let Some(pos) = self.out_protocols.iter().position(|(n, _)| *n == protocol_name) { + if let Some(substream) = out_substreams[pos].as_mut() { + let _ = substream.start_send_unpin(message); + continue 'poll_notifs_sink; + } + + } else { + log::warn!( + target: "sub-libp2p", + "Tried to send a notification on non-registered protocol: {:?}", + protocol_name + ); + } + } + NotificationsSinkMessage::ForceClose => { + return Poll::Ready( + ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged) + ); + } + } + } + } + + // The legacy substreams are polled only if the state is `Open`. Otherwise, it would be + // possible to receive notifications that would need to get silently discarded. + if matches!(self.state, State::Open { .. }) { + for n in (0..self.legacy_substreams.len()).rev() { + let mut substream = self.legacy_substreams.swap_remove(n); + let poll_outcome = Pin::new(&mut substream).poll_next(cx); + match poll_outcome { + Poll::Pending => self.legacy_substreams.push(substream), + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { + self.legacy_substreams.push(substream); + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message } + )) + }, + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { + return Poll::Ready(ProtocolsHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged + )) + } + Poll::Ready(None) | Poll::Ready(Some(Err(_))) => { + if matches!(poll_outcome, Poll::Ready(None)) { + self.legacy_shutdown.push(substream); + } + + if let State::Open { want_closed, .. } = &mut self.state { + if !*want_closed { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )) + } + } + } + } + } + } + + shutdown_list(&mut self.legacy_shutdown, cx); + + Poll::Pending + } +} -mod group; -mod legacy; -mod notif_in; -mod notif_out; +/// Given a list of substreams, tries to shut them down. The substreams that have been successfully +/// shut down are removed from the list. +fn shutdown_list + (list: &mut SmallVec>>, + cx: &mut Context) +{ + 'outer: for n in (0..list.len()).rev() { + let mut substream = list.swap_remove(n); + loop { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(_))) => {} + Poll::Pending => break, + Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, + } + } + list.push(substream); + } +} diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs deleted file mode 100644 index fbfdb1cb6ab0e..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ /dev/null @@ -1,737 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming -//! and outgoing substreams for all gossiping protocols together. -//! -//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the -//! protocols that are Substrate-related and outside of the scope of libp2p. -//! -//! # Usage -//! -//! The handler can be in one of the following states: `Initial`, `Enabled`, `Disabled`. -//! -//! The `Initial` state is the state that the handler initially is in. It is a temporary state -//! during which the user must either enable or disable the handler. After that, the handler stays -//! either enabled or disabled. -//! -//! On the wire, we try to open the following substreams: -//! -//! - One substream for each notification protocol passed as parameter to the -//! `NotifsHandlerProto::new` function. -//! - One "legacy" substream used for anything non-related to gossiping, and used as a fallback -//! in case the notification protocol can't be opened. -//! -//! When the handler is in the `Enabled` state, we immediately open and try to maintain all the -//! aforementioned substreams. When the handler is in the `Disabled` state, we immediately close -//! (or abort opening) all these substreams. It is intended that in the future we allow states in -//! which some protocols are open and not others. Symmetrically, we allow incoming -//! Substrate-related substreams if and only if we are in the `Enabled` state. -//! -//! The user has the choice between sending a message with `SendNotification`, to send a -//! notification, and `SendLegacy`, to send any other kind of message. -//! - -use crate::protocol::generic_proto::{ - handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, - handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut}, - handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, - upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec}, -}; - -use bytes::BytesMut; -use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use futures::{ - channel::mpsc, - lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, - prelude::* -}; -use log::{debug, error}; -use parking_lot::{Mutex, RwLock}; -use std::{borrow::Cow, str, sync::Arc, task::{Context, Poll}}; - -/// Number of pending notifications in asynchronous contexts. -/// See [`NotificationsSink::reserve_notification`] for context. -const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; -/// Number of pending notifications in synchronous contexts. -const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsHandler`]. -/// -/// See the documentation at the module level for more information. -pub struct NotifsHandlerProto { - /// Prototypes for handlers for inbound substreams, and the message we respond with in the - /// handshake. - in_handlers: Vec<(NotifsInHandlerProto, Arc>>)>, - - /// Prototypes for handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandlerProto, Arc>>)>, - - /// Prototype for handler for backwards-compatibility. - legacy: LegacyProtoHandlerProto, -} - -/// The actual handler once the connection has been established. -/// -/// See the documentation at the module level for more information. -pub struct NotifsHandler { - /// Handlers for inbound substreams, and the message we respond with in the handshake. - in_handlers: Vec<(NotifsInHandler, Arc>>)>, - - /// Handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandler, Arc>>)>, - - /// Whether we are the connection dialer or listener. - endpoint: ConnectedPoint, - - /// Handler for backwards-compatibility. - legacy: LegacyProtoHandler, - - /// In the situation where either the legacy substream has been opened or the handshake-bearing - /// notifications protocol is open, but we haven't sent out any [`NotifsHandlerOut::Open`] - /// event yet, this contains the received handshake waiting to be reported through the - /// external API. - pending_handshake: Option>, - - /// State of this handler. - enabled: EnabledState, - - /// If we receive inbound substream requests while in initialization mode, - /// we push the corresponding index here and process them when the handler - /// gets enabled/disabled. - pending_in: Vec, - - /// If `Some`, contains the two `Receiver`s connected to the [`NotificationsSink`] that has - /// been sent out. The notifications to send out can be pulled from this receivers. - /// We use two different channels in order to have two different channel sizes, but from the - /// receiving point of view, the two channels are the same. - /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. - /// - /// Contains `Some` if and only if it has been reported to the user that the substreams are - /// open. - notifications_sink_rx: Option< - stream::Select< - stream::Fuse>, - stream::Fuse> - > - >, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -enum EnabledState { - Initial, - Enabled, - Disabled, -} - -impl IntoProtocolsHandler for NotifsHandlerProto { - type Handler = NotifsHandler; - - fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { - let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.inbound_protocol()) - .collect::>(); - - SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) - } - - fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { - NotifsHandler { - in_handlers: self.in_handlers - .into_iter() - .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) - .collect(), - out_handlers: self.out_handlers - .into_iter() - .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) - .collect(), - endpoint: connected_point.clone(), - legacy: self.legacy.into_handler(remote_peer_id, connected_point), - pending_handshake: None, - enabled: EnabledState::Initial, - pending_in: Vec::new(), - notifications_sink_rx: None, - } - } -} - -/// Event that can be received by a `NotifsHandler`. -#[derive(Debug, Clone)] -pub enum NotifsHandlerIn { - /// The node should start using custom protocols. - Enable, - - /// The node should stop using custom protocols. - Disable, -} - -/// Event that can be emitted by a `NotifsHandler`. -#[derive(Debug)] -pub enum NotifsHandlerOut { - /// The connection is open for custom protocols. - Open { - /// The endpoint of the connection that is open for custom protocols. - endpoint: ConnectedPoint, - /// Handshake that was sent to us. - /// This is normally a "Status" message, but this out of the concern of this code. - received_handshake: Vec, - /// How notifications can be sent to this node. - notifications_sink: NotificationsSink, - }, - - /// The connection is closed for custom protocols. - Closed { - /// The reason for closing, for diagnostic purposes. - reason: Cow<'static, str>, - /// The endpoint of the connection that closed for custom protocols. - endpoint: ConnectedPoint, - }, - - /// Received a non-gossiping message on the legacy substream. - CustomMessage { - /// Message that has been received. - /// - /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a - /// notification. - message: BytesMut, - }, - - /// Received a message on a custom protocol substream. - Notification { - /// Name of the protocol of the message. - protocol_name: Cow<'static, str>, - - /// Message that has been received. - message: BytesMut, - }, -} - -/// Sink connected directly to the node background task. Allows sending notifications to the peer. -/// -/// Can be cloned in order to obtain multiple references to the same peer. -#[derive(Debug, Clone)] -pub struct NotificationsSink { - inner: Arc, -} - -#[derive(Debug)] -struct NotificationsSinkInner { - /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. - async_channel: FuturesMutex>, - /// Sender to use in synchronous contexts. Uses a synchronous mutex. - /// This channel has a large capacity and is meant to be used in contexts where - /// back-pressure cannot be properly exerted. - /// It will be removed in a future version. - sync_channel: Mutex>, -} - -/// Message emitted through the [`NotificationsSink`] and processed by the background task -/// dedicated to the peer. -#[derive(Debug)] -enum NotificationsSinkMessage { - /// Message emitted by [`NotificationsSink::reserve_notification`] and - /// [`NotificationsSink::write_notification_now`]. - Notification { - protocol_name: Cow<'static, str>, - message: Vec, - }, - - /// Must close the connection. - ForceClose, -} - -impl NotificationsSink { - /// Sends a notification to the peer. - /// - /// If too many messages are already buffered, the notification is silently discarded and the - /// connection to the peer will be closed shortly after. - /// - /// The protocol name is expected to be checked ahead of calling this method. It is a logic - /// error to send a notification using an unknown protocol. - /// - /// This method will be removed in a future version. - pub fn send_sync_notification<'a>( - &'a self, - protocol_name: Cow<'static, str>, - message: impl Into> - ) { - let mut lock = self.inner.sync_channel.lock(); - let result = lock.try_send(NotificationsSinkMessage::Notification { - protocol_name, - message: message.into() - }); - - if result.is_err() { - // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the - // buffer, and therefore that `try_send` will succeed. - let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); - debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); - } - } - - /// Wait until the remote is ready to accept a notification. - /// - /// Returns an error in the case where the connection is closed. - /// - /// The protocol name is expected to be checked ahead of calling this method. It is a logic - /// error to send a notification using an unknown protocol. - pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result, ()> { - let mut lock = self.inner.async_channel.lock().await; - - let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; - if poll_ready.is_ok() { - Ok(Ready { protocol_name: protocol_name, lock }) - } else { - Err(()) - } - } -} - -/// Notification slot is reserved and the notification can actually be sent. -#[must_use] -#[derive(Debug)] -pub struct Ready<'a> { - /// Guarded channel. The channel inside is guaranteed to not be full. - lock: FuturesMutexGuard<'a, mpsc::Sender>, - /// Name of the protocol. Should match one of the protocols passed at initialization. - protocol_name: Cow<'static, str>, -} - -impl<'a> Ready<'a> { - /// Consumes this slots reservation and actually queues the notification. - /// - /// Returns an error if the substream has been closed. - pub fn send( - mut self, - notification: impl Into> - ) -> Result<(), ()> { - self.lock.start_send(NotificationsSinkMessage::Notification { - protocol_name: self.protocol_name, - message: notification.into(), - }).map_err(|_| ()) - } -} - -/// Error specific to the collection of protocols. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum NotifsHandlerError { - /// Channel of synchronous notifications is full. - SyncNotificationsClogged, - /// Error in legacy protocol. - Legacy(::Error), -} - -impl NotifsHandlerProto { - /// Builds a new handler. - /// - /// `list` is a list of notification protocols names, and the message to send as part of the - /// handshake. At the moment, the message is always the same whether we open a substream - /// ourselves or respond to handshake from the remote. - /// - /// The first protocol in `list` is special-cased as the protocol that contains the handshake - /// to report through the [`NotifsHandlerOut::Open`] event. - /// - /// # Panic - /// - /// - Panics if `list` is empty. - /// - pub fn new( - legacy: RegisteredProtocol, - list: impl Into, Arc>>)>>, - ) -> Self { - let list = list.into(); - assert!(!list.is_empty()); - - let out_handlers = list - .clone() - .into_iter() - .map(|(proto_name, initial_message)| { - (NotifsOutHandlerProto::new(proto_name), initial_message) - }).collect(); - - let in_handlers = list.clone() - .into_iter() - .map(|(proto_name, msg)| (NotifsInHandlerProto::new(proto_name), msg)) - .collect(); - - NotifsHandlerProto { - in_handlers, - out_handlers, - legacy: LegacyProtoHandlerProto::new(legacy), - } - } -} - -impl ProtocolsHandler for NotifsHandler { - type InEvent = NotifsHandlerIn; - type OutEvent = NotifsHandlerOut; - type Error = NotifsHandlerError; - type InboundProtocol = SelectUpgrade, RegisteredProtocol>; - type OutboundProtocol = NotificationsOut; - // Index within the `out_handlers` - type OutboundOpenInfo = usize; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.listen_protocol().into_upgrade().1) - .collect::>(); - - let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); - SubstreamProtocol::new(proto, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output, - (): () - ) { - match out { - EitherOutput::First((out, num)) => - self.in_handlers[num].0.inject_fully_negotiated_inbound(out, ()), - EitherOutput::Second(out) => - self.legacy.inject_fully_negotiated_inbound(out, ()), - } - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - num: Self::OutboundOpenInfo - ) { - self.out_handlers[num].0.inject_fully_negotiated_outbound(out, ()) - } - - fn inject_event(&mut self, message: NotifsHandlerIn) { - match message { - NotifsHandlerIn::Enable => { - if let EnabledState::Enabled = self.enabled { - debug!("enabling already-enabled handler"); - } - self.enabled = EnabledState::Enabled; - self.legacy.inject_event(LegacyProtoHandlerIn::Enable); - for (handler, initial_message) in &mut self.out_handlers { - // We create `initial_message` on a separate line to be sure that the lock - // is released as soon as possible. - let initial_message = initial_message.read().clone(); - handler.inject_event(NotifsOutHandlerIn::Enable { - initial_message, - }); - } - for num in self.pending_in.drain(..) { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = self.in_handlers[num].1.read().clone(); - self.in_handlers[num].0 - .inject_event(NotifsInHandlerIn::Accept(handshake_message)); - } - }, - NotifsHandlerIn::Disable => { - if let EnabledState::Disabled = self.enabled { - debug!("disabling already-disabled handler"); - } - self.legacy.inject_event(LegacyProtoHandlerIn::Disable); - // The notifications protocols start in the disabled state. If we were in the - // "Initial" state, then we shouldn't disable the notifications protocols again. - if self.enabled != EnabledState::Initial { - for (handler, _) in &mut self.out_handlers { - handler.inject_event(NotifsOutHandlerIn::Disable); - } - } - self.enabled = EnabledState::Disabled; - for num in self.pending_in.drain(..) { - self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); - } - }, - } - } - - fn inject_dial_upgrade_error( - &mut self, - num: usize, - err: ProtocolsHandlerUpgrErr - ) { - match err { - ProtocolsHandlerUpgrErr::Timeout => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timeout - ), - ProtocolsHandlerUpgrErr::Timer => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timer - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) - ), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - // Iterate over each handler and return the maximum value. - - let mut ret = self.legacy.connection_keep_alive(); - if ret.is_yes() { - return KeepAlive::Yes; - } - - for (handler, _) in &self.in_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } - } - - for (handler, _) in &self.out_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } - } - - ret - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - if let Some(notifications_sink_rx) = &mut self.notifications_sink_rx { - 'poll_notifs_sink: loop { - // Before we poll the notifications sink receiver, check that all the notification - // channels are ready to send a message. - // TODO: it is planned that in the future we switch to one `NotificationsSink` per - // protocol, in which case each sink should wait only for its corresponding handler - // to be ready, and not all handlers - // see https://github.com/paritytech/substrate/issues/5670 - for (out_handler, _) in &mut self.out_handlers { - match out_handler.poll_ready(cx) { - Poll::Ready(_) => {}, - Poll::Pending => break 'poll_notifs_sink, - } - } - - let message = match notifications_sink_rx.poll_next_unpin(cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) | Poll::Pending => break, - }; - - match message { - NotificationsSinkMessage::Notification { - protocol_name, - message - } => { - let mut found_any_with_name = false; - - for (handler, _) in &mut self.out_handlers { - if *handler.protocol_name() == protocol_name { - found_any_with_name = true; - if handler.is_open() { - handler.send_or_discard(message); - continue 'poll_notifs_sink; - } - } - } - - // This code can be reached via the following scenarios: - // - // - User tried to send a notification on a non-existing protocol. This - // most likely relates to https://github.com/paritytech/substrate/issues/6827 - // - User tried to send a notification to a peer we're not or no longer - // connected to. This happens in a normal scenario due to the racy nature - // of connections and disconnections, and is benign. - // - // We print a warning in the former condition. - if !found_any_with_name { - log::warn!( - target: "sub-libp2p", - "Tried to send a notification on non-registered protocol: {:?}", - protocol_name - ); - } - } - NotificationsSinkMessage::ForceClose => { - return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged)); - } - } - } - } - - // If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing - // substream (either the legacy substream or the one special-cased as providing the - // handshake) is open but the user isn't aware yet of the substreams being open. - // When that is the case, neither the legacy substream nor the incoming notifications - // substreams should be polled, otherwise there is a risk of receiving messages from them. - if self.pending_handshake.is_none() { - while let Poll::Ready(ev) = self.legacy.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, .. } => - match *protocol.info() {}, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { - received_handshake, - .. - }) => { - if self.notifications_sink_rx.is_none() { - debug_assert!(self.pending_handshake.is_none()); - self.pending_handshake = Some(received_handshake); - } - cx.waker().wake_by_ref(); - return Poll::Pending; - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason, .. }) => { - // We consciously drop the receivers despite notifications being potentially - // still buffered up. - self.notifications_sink_rx = None; - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Closed { endpoint: self.endpoint.clone(), reason } - )) - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CustomMessage { message } - )) - }, - ProtocolsHandlerEvent::Close(err) => - return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), - } - } - } - - for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { - loop { - let poll = if self.notifications_sink_rx.is_some() { - handler.poll(cx) - } else { - handler.poll_process(cx) - }; - - let ev = match poll { - Poll::Ready(e) => e, - Poll::Pending => break, - }; - - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => - error!("Incoming substream handler tried to open a substream"), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => - match self.enabled { - EnabledState::Initial => self.pending_in.push(handler_num), - EnabledState::Enabled => { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = handshake_message.read().clone(); - handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) - }, - EnabledState::Disabled => - handler.inject_event(NotifsInHandlerIn::Refuse), - }, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { - debug_assert!(self.pending_handshake.is_none()); - if self.notifications_sink_rx.is_some() { - let msg = NotifsHandlerOut::Notification { - message, - protocol_name: handler.protocol_name().clone(), - }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); - } - }, - } - } - } - - for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { - while let Poll::Ready(ev) = handler.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } => - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_info(|()| handler_num), - }), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - - // Opened substream on the handshake-bearing notification protocol. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }) - if handler_num == 0 => - { - if self.notifications_sink_rx.is_none() && self.pending_handshake.is_none() { - self.pending_handshake = Some(handshake); - } - }, - - // Nothing to do in response to other notification substreams being opened - // or closed. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, - } - } - } - - if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { - if let Some(handshake) = self.pending_handshake.take() { - let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); - let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); - let notifications_sink = NotificationsSink { - inner: Arc::new(NotificationsSinkInner { - async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), - }), - }; - - debug_assert!(self.notifications_sink_rx.is_none()); - self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open { - endpoint: self.endpoint.clone(), - received_handshake: handshake, - notifications_sink - } - )) - } - } - - Poll::Pending - } -} diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs deleted file mode 100644 index 404093553785c..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; -use bytes::BytesMut; -use futures::prelude::*; -use futures_timer::Delay; -use libp2p::core::{ConnectedPoint, PeerId, Endpoint}; -use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{debug, error}; -use smallvec::{smallvec, SmallVec}; -use std::{borrow::Cow, collections::VecDeque, convert::Infallible, error, fmt, io, mem}; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific -/// to Substrate on that single connection. -/// -/// Note that there can be multiple instance of this struct simultaneously for same peer, -/// if there are multiple established connections to the peer. -/// -/// ## State of the handler -/// -/// There are six possible states for the handler: -/// -/// - Enabled and open, which is a normal operation. -/// - Enabled and closed, in which case it will try to open substreams. -/// - Disabled and open, in which case it will try to close substreams. -/// - Disabled and closed, in which case the handler is idle. The connection will be -/// garbage-collected after a few seconds if nothing more happens. -/// - Initializing and open. -/// - Initializing and closed, which is the state the handler starts in. -/// -/// The Init/Enabled/Disabled state is entirely controlled by the user by sending `Enable` or -/// `Disable` messages to the handler. The handler itself never transitions automatically between -/// these states. For example, if the handler reports a network misbehaviour, it will close the -/// substreams but it is the role of the user to send a `Disabled` event if it wants the connection -/// to close. Otherwise, the handler will try to reopen substreams. -/// -/// The handler starts in the "Initializing" state and must be transitionned to Enabled or Disabled -/// as soon as possible. -/// -/// The Open/Closed state is decided by the handler and is reported with the `CustomProtocolOpen` -/// and `CustomProtocolClosed` events. The `CustomMessage` event can only be generated if the -/// handler is open. -/// -/// ## How it works -/// -/// When the handler is created, it is initially in the `Init` state and waits for either a -/// `Disable` or an `Enable` message from the outer layer. At any time, the outer layer is free to -/// toggle the handler between the disabled and enabled states. -/// -/// When the handler switches to "enabled", it opens a substream and negotiates the protocol named -/// `/substrate/xxx`, where `xxx` is chosen by the user and depends on the chain. -/// -/// For backwards compatibility reasons, when we switch to "enabled" for the first time (while we -/// are still in "init" mode) and we are the connection listener, we don't open a substream. -/// -/// In order the handle the situation where both the remote and us get enabled at the same time, -/// we tolerate multiple substreams open at the same time. Messages are transmitted on an arbitrary -/// substream. The endpoints don't try to agree on a single substream. -/// -/// We consider that we are now "closed" if the remote closes all the existing substreams. -/// Re-opening it can then be performed by closing all active substream and re-opening one. -/// -pub struct LegacyProtoHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, -} - -impl LegacyProtoHandlerProto { - /// Builds a new `LegacyProtoHandlerProto`. - pub fn new(protocol: RegisteredProtocol) -> Self { - LegacyProtoHandlerProto { - protocol, - } - } -} - -impl IntoProtocolsHandler for LegacyProtoHandlerProto { - type Handler = LegacyProtoHandler; - - fn inbound_protocol(&self) -> RegisteredProtocol { - self.protocol.clone() - } - - fn into_handler(self, remote_peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler { - LegacyProtoHandler { - protocol: self.protocol, - remote_peer_id: remote_peer_id.clone(), - state: ProtocolState::Init { - substreams: SmallVec::new(), - init_deadline: Delay::new(Duration::from_secs(20)) - }, - events_queue: VecDeque::new(), - } - } -} - -/// The actual handler once the connection has been established. -pub struct LegacyProtoHandler { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, - - /// State of the communications with the remote. - state: ProtocolState, - - /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have - /// any influence on the behaviour. - remote_peer_id: PeerId, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque< - ProtocolsHandlerEvent - >, -} - -/// State of the handler. -enum ProtocolState { - /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. - Init { - /// List of substreams opened by the remote but that haven't been processed yet. - /// For each substream, also includes the handshake message that we have received. - substreams: SmallVec<[(RegisteredProtocolSubstream, Vec); 6]>, - /// Deadline after which the initialization is abnormally long. - init_deadline: Delay, - }, - - /// Handler is ready to accept incoming substreams. - /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. - Opening, - - /// Normal operating mode. Contains the substreams that are open. - /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. - Normal { - /// The substreams where bidirectional communications happen. - substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - /// Contains substreams which are being shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, - }, - - /// We are disabled. Contains substreams that are being closed. - /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the - /// outside or we have never sent any `CustomProtocolOpen` in the first place. - Disabled { - /// List of substreams to shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, - - /// If true, we should reactivate the handler after all the substreams in `shutdown` have - /// been closed. - /// - /// Since we don't want to mix old and new substreams, we wait for all old substreams to - /// be closed before opening any new one. - reenable: bool, - }, - - /// In this state, we don't care about anything anymore and need to kill the connection as soon - /// as possible. - KillAsap, - - /// We sometimes temporarily switch to this state during processing. If we are in this state - /// at the beginning of a method, that means something bad happened in the source code. - Poisoned, -} - -/// Event that can be received by a `LegacyProtoHandler`. -#[derive(Debug)] -pub enum LegacyProtoHandlerIn { - /// The node should start using custom protocols. - Enable, - - /// The node should stop using custom protocols. - Disable, -} - -/// Event that can be emitted by a `LegacyProtoHandler`. -#[derive(Debug)] -pub enum LegacyProtoHandlerOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, - /// Handshake message that has been sent to us. - /// This is normally a "Status" message, but this out of the concern of this code. - received_handshake: Vec, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Reason why the substream closed, for diagnostic purposes. - reason: Cow<'static, str>, - }, - - /// Receives a message on a custom protocol substream. - CustomMessage { - /// Message that has been received. - message: BytesMut, - }, -} - -impl LegacyProtoHandler { - /// Enables the handler. - fn enable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: mut incoming, .. } => { - if incoming.is_empty() { - ProtocolState::Opening - } else { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: incoming[0].0.protocol_version(), - received_handshake: mem::replace(&mut incoming[0].1, Vec::new()), - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: incoming.into_iter().map(|(s, _)| s).collect(), - shutdown: SmallVec::new() - } - } - } - - st @ ProtocolState::KillAsap => st, - st @ ProtocolState::Opening { .. } => st, - st @ ProtocolState::Normal { .. } => st, - ProtocolState::Disabled { shutdown, .. } => { - ProtocolState::Disabled { shutdown, reenable: true } - } - } - } - - /// Disables the handler. - fn disable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: shutdown, .. } => { - let mut shutdown = shutdown.into_iter().map(|(s, _)| s).collect::>(); - for s in &mut shutdown { - s.shutdown(); - } - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::Opening { .. } | ProtocolState::Normal { .. } => - // At the moment, if we get disabled while things were working, we kill the entire - // connection in order to force a reset of the state. - // This is obviously an extremely shameful way to do things, but at the time of - // the writing of this comment, the networking works very poorly and a solution - // needs to be found. - ProtocolState::KillAsap, - - ProtocolState::Disabled { shutdown, .. } => - ProtocolState::Disabled { shutdown, reenable: false }, - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - - /// Polls the state for events. Optionally returns an event to produce. - #[must_use] - fn poll_state(&mut self, cx: &mut Context) - -> Option> { - match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - self.state = ProtocolState::Poisoned; - None - } - - ProtocolState::Init { substreams, mut init_deadline } => { - match Pin::new(&mut init_deadline).poll(cx) { - Poll::Ready(()) => { - error!(target: "sub-libp2p", "Handler initialization process is too long \ - with {:?}", self.remote_peer_id); - self.state = ProtocolState::KillAsap; - }, - Poll::Pending => { - self.state = ProtocolState::Init { substreams, init_deadline }; - } - } - - None - } - - ProtocolState::Opening => { - self.state = ProtocolState::Opening; - None - } - - ProtocolState::Normal { mut substreams, mut shutdown } => { - for n in (0..substreams.len()).rev() { - let mut substream = substreams.swap_remove(n); - match Pin::new(&mut substream).poll_next(cx) { - Poll::Pending => substreams.push(substream), - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - let event = LegacyProtoHandlerOut::CustomMessage { - message - }; - substreams.push(substream); - self.state = ProtocolState::Normal { substreams, shutdown }; - return Some(ProtocolsHandlerEvent::Custom(event)); - }, - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "Legacy substream clogged".into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(None) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "All substreams have been closed by the remote".into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(Some(Err(err))) => { - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: format!("Error on the last substream: {:?}", err).into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } else { - debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); - } - } - } - } - - // This code is reached is none if and only if none of the substreams are in a ready state. - self.state = ProtocolState::Normal { substreams, shutdown }; - None - } - - ProtocolState::Disabled { mut shutdown, reenable } => { - shutdown_list(&mut shutdown, cx); - // If `reenable` is `true`, that means we should open the substreams system again - // after all the substreams are closed. - if reenable && shutdown.is_empty() { - self.state = ProtocolState::Opening; - } else { - self.state = ProtocolState::Disabled { shutdown, reenable }; - } - None - } - - ProtocolState::KillAsap => None, - } - } -} - -impl ProtocolsHandler for LegacyProtoHandler { - type InEvent = LegacyProtoHandlerIn; - type OutEvent = LegacyProtoHandlerOut; - type Error = ConnectionKillError; - type InboundProtocol = RegisteredProtocol; - type OutboundProtocol = RegisteredProtocol; - type OutboundOpenInfo = Infallible; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.protocol.clone(), ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (mut substream, received_handshake): >::Output, - (): () - ) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { mut substreams, init_deadline } => { - if substream.endpoint() == Endpoint::Dialer { - error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ - initialization", self.remote_peer_id); - } - substreams.push((substream, received_handshake)); - ProtocolState::Init { substreams, init_deadline } - } - - ProtocolState::Opening { .. } => { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: substream.protocol_version(), - received_handshake, - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: smallvec![substream], - shutdown: SmallVec::new() - } - } - - ProtocolState::Normal { substreams: mut existing, shutdown } => { - existing.push(substream); - ProtocolState::Normal { substreams: existing, shutdown } - } - - ProtocolState::Disabled { mut shutdown, .. } => { - substream.shutdown(); - shutdown.push(substream); - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - - fn inject_fully_negotiated_outbound( - &mut self, - _: >::Output, - unreachable: Self::OutboundOpenInfo - ) { - match unreachable {} - } - - fn inject_event(&mut self, message: LegacyProtoHandlerIn) { - match message { - LegacyProtoHandlerIn::Disable => self.disable(), - LegacyProtoHandlerIn::Enable => self.enable(), - } - } - - fn inject_dial_upgrade_error( - &mut self, - unreachable: Self::OutboundOpenInfo, - _: ProtocolsHandlerUpgrErr - ) { - match unreachable {} - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - ProtocolState::Init { .. } | ProtocolState::Normal { .. } => KeepAlive::Yes, - ProtocolState::Opening { .. } | ProtocolState::Disabled { .. } | - ProtocolState::Poisoned | ProtocolState::KillAsap => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - // Kill the connection if needed. - if let ProtocolState::KillAsap = self.state { - return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError)); - } - - // Process all the substreams. - if let Some(event) = self.poll_state(cx) { - return Poll::Ready(event) - } - - Poll::Pending - } -} - -impl fmt::Debug for LegacyProtoHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("LegacyProtoHandler") - .finish() - } -} - -/// Given a list of substreams, tries to shut them down. The substreams that have been successfully -/// shut down are removed from the list. -fn shutdown_list - (list: &mut SmallVec>>, - cx: &mut Context) -{ - 'outer: for n in (0..list.len()).rev() { - let mut substream = list.swap_remove(n); - loop { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(_))) => {} - Poll::Pending => break, - Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, - } - } - list.push(substream); - } -} - -/// Error returned when switching from normal to disabled. -#[derive(Debug)] -pub struct ConnectionKillError; - -impl error::Error for ConnectionKillError { -} - -impl fmt::Display for ConnectionKillError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Connection kill when switching from normal to disabled") - } -} diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs deleted file mode 100644 index d3b505e0de3e2..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/notif_in.rs +++ /dev/null @@ -1,293 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for ingoing -//! substreams for a single gossiping protocol. -//! -//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple -//! > protocols, you need to create multiple instances and group them. -//! - -use crate::protocol::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream}; -use bytes::BytesMut; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{error, warn}; -use std::{borrow::Cow, collections::VecDeque, fmt, pin::Pin, task::{Context, Poll}}; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsInHandler`]. -pub struct NotifsInHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - in_protocol: NotificationsIn, -} - -/// The actual handler once the connection has been established. -pub struct NotifsInHandler { - /// Configuration for the protocol upgrade to negotiate for inbound substreams. - in_protocol: NotificationsIn, - - /// Substream that is open with the remote. - substream: Option>, - - /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and - /// `Closed` messages in a row without the handler having time to respond with `Accept` or - /// `Refuse`. - /// - /// In order to keep the state consistent, we increment this variable every time an - /// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received. - pending_accept_refuses: usize, - - /// Queue of events to send to the outside. - /// - /// This queue is only ever modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque>, -} - -/// Event that can be received by a `NotifsInHandler`. -#[derive(Debug, Clone)] -pub enum NotifsInHandlerIn { - /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send - /// to the remote. - /// - /// After sending this to the handler, the substream is now considered open and `Notif` events - /// can be received. - Accept(Vec), - - /// Can be sent back as a response to an `OpenRequest`. - Refuse, -} - -/// Event that can be emitted by a `NotifsInHandler`. -#[derive(Debug)] -pub enum NotifsInHandlerOut { - /// The remote wants to open a substream. Contains the initial message sent by the remote - /// when the substream has been opened. - /// - /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent - /// back even if a `Closed` is received. - OpenRequest(Vec), - - /// The notifications substream has been closed by the remote. In order to avoid race - /// conditions, this does **not** cancel any previously-sent `OpenRequest`. - Closed, - - /// Received a message on the notifications substream. - /// - /// Can only happen after an `Accept` and before a `Closed`. - Notif(BytesMut), -} - -impl NotifsInHandlerProto { - /// Builds a new `NotifsInHandlerProto`. - pub fn new( - protocol_name: impl Into> - ) -> Self { - NotifsInHandlerProto { - in_protocol: NotificationsIn::new(protocol_name), - } - } -} - -impl IntoProtocolsHandler for NotifsInHandlerProto { - type Handler = NotifsInHandler; - - fn inbound_protocol(&self) -> NotificationsIn { - self.in_protocol.clone() - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsInHandler { - in_protocol: self.in_protocol, - substream: None, - pending_accept_refuses: 0, - events_queue: VecDeque::new(), - } - } -} - -impl NotifsInHandler { - /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &Cow<'static, str> { - self.in_protocol.protocol_name() - } - - /// Equivalent to the `poll` method of `ProtocolsHandler`, except that it is guaranteed to - /// never generate [`NotifsInHandlerOut::Notif`]. - /// - /// Use this method in situations where it is not desirable to receive events but still - /// necessary to drive any potential incoming handshake or request. - pub fn poll_process( - &mut self, - cx: &mut Context - ) -> Poll< - ProtocolsHandlerEvent - > { - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Ok(v))) => match v {}, - Some(Poll::Ready(Err(_))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } -} - -impl ProtocolsHandler for NotifsInHandler { - type InEvent = NotifsInHandlerIn; - type OutEvent = NotifsInHandlerOut; - type Error = void::Void; - type InboundProtocol = NotificationsIn; - type OutboundProtocol = DeniedUpgrade; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.in_protocol.clone(), ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (msg, proto): >::Output, - (): () - ) { - // If a substream already exists, we drop it and replace it with the new incoming one. - if self.substream.is_some() { - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - } - - // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" - // to the remote and force-close the substream. It might seem like an unclean way to get - // rid of a substream. However, keep in mind that it is invalid for the remote to open - // multiple such substreams, and therefore sending a "RST" is not an incorrect thing to do. - self.substream = Some(proto); - - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); - self.pending_accept_refuses = self.pending_accept_refuses - .checked_add(1) - .unwrap_or_else(|| { - error!(target: "sub-libp2p", "Overflow in pending_accept_refuses"); - usize::max_value() - }); - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - _: Self::OutboundOpenInfo - ) { - // We never emit any outgoing substream. - void::unreachable(out) - } - - fn inject_event(&mut self, message: NotifsInHandlerIn) { - self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { - Some(v) => v, - None => { - error!( - target: "sub-libp2p", - "Inconsistent state: received Accept/Refuse when no pending request exists" - ); - return; - } - }; - - // If we send multiple `OpenRequest`s in a row, we will receive back multiple - // `Accept`/`Refuse` messages. All of them are obsolete except the last one. - if self.pending_accept_refuses != 0 { - return; - } - - match (message, self.substream.as_mut()) { - (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), - (NotifsInHandlerIn::Accept(_), None) => {}, - (NotifsInHandlerIn::Refuse, _) => self.substream = None, - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); - } - - fn connection_keep_alive(&self) -> KeepAlive { - if self.substream.is_some() { - KeepAlive::Yes - } else { - KeepAlive::No - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Some(Ok(msg)))) => { - if self.pending_accept_refuses != 0 { - warn!( - target: "sub-libp2p", - "Bad state in inbound-only handler: notif before accepting substream" - ); - } - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))) - }, - Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } -} - -impl fmt::Debug for NotifsInHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsInHandler") - .field("substream_open", &self.substream.is_some()) - .finish() - } -} diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs deleted file mode 100644 index 414e62c0d135f..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for outgoing -//! substreams of a single gossiping protocol. -//! -//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple -//! > protocols, you need to create multiple instances and group them. -//! - -use crate::protocol::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream, NotificationsHandshakeError}; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{debug, warn, error}; -use std::{ - borrow::Cow, collections::VecDeque, fmt, mem, pin::Pin, task::{Context, Poll, Waker}, - time::Duration -}; -use wasm_timer::Instant; - -/// Maximum duration to open a substream and receive the handshake message. After that, we -/// consider that we failed to open the substream. -const OPEN_TIMEOUT: Duration = Duration::from_secs(10); -/// After successfully establishing a connection with the remote, we keep the connection open for -/// at least this amount of time in order to give the rest of the code the chance to notify us to -/// open substreams. -const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsOutHandler`]. -/// -/// See the documentation of [`NotifsOutHandler`] for more information. -pub struct NotifsOutHandlerProto { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, str>, -} - -impl NotifsOutHandlerProto { - /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the - /// notifications substream. - pub fn new(protocol_name: impl Into>) -> Self { - NotifsOutHandlerProto { - protocol_name: protocol_name.into(), - } - } -} - -impl IntoProtocolsHandler for NotifsOutHandlerProto { - type Handler = NotifsOutHandler; - - fn inbound_protocol(&self) -> DeniedUpgrade { - DeniedUpgrade - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsOutHandler { - protocol_name: self.protocol_name, - when_connection_open: Instant::now(), - state: State::Disabled, - events_queue: VecDeque::new(), - } - } -} - -/// Handler for an outbound notification substream. -/// -/// When a connection is established, this handler starts in the "disabled" state, meaning that -/// no substream will be open. -/// -/// One can try open a substream by sending an [`NotifsOutHandlerIn::Enable`] message to the -/// handler. Once done, the handler will try to establish then maintain an outbound substream with -/// the remote for the purpose of sending notifications to it. -pub struct NotifsOutHandler { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, str>, - - /// Relationship with the node we're connected to. - state: State, - - /// When the connection with the remote has been successfully established. - when_connection_open: Instant, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque>, -} - -/// Our relationship with the node we're connected to. -enum State { - /// The handler is disabled and idle. No substream is open. - Disabled, - - /// The handler is disabled. A substream is still open and needs to be closed. - /// - /// > **Important**: Having this state means that `poll_close` has been called at least once, - /// > but the `Sink` API is unclear about whether or not the stream can then - /// > be recovered. Because of that, we must never switch from the - /// > `DisabledOpen` state to the `Open` state while keeping the same substream. - DisabledOpen(NotificationsOutSubstream), - - /// The handler is disabled but we are still trying to open a substream with the remote. - /// - /// If the handler gets enabled again, we can immediately switch to `Opening`. - DisabledOpening, - - /// The handler is enabled and we are trying to open a substream with the remote. - Opening { - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// The handler is enabled. We have tried opening a substream in the past but the remote - /// refused it. - Refused, - - /// The handler is enabled and substream is open. - Open { - /// Substream that is currently open. - substream: NotificationsOutSubstream, - /// Waker for the last task that got `Poll::Pending` from `poll_ready`, to notify - /// when the open substream closes due to being disabled or encountering an - /// error, i.e. to notify the task as soon as the substream becomes unavailable, - /// without waiting for an underlying I/O task wakeup. - close_waker: Option, - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// Poisoned state. Shouldn't be found in the wild. - Poisoned, -} - -/// Event that can be received by a `NotifsOutHandler`. -#[derive(Debug)] -pub enum NotifsOutHandlerIn { - /// Enables the notifications substream for this node. The handler will try to maintain a - /// substream with the remote. - Enable { - /// Initial message to send to remote nodes when we open substreams. - initial_message: Vec, - }, - - /// Disables the notifications substream for this node. This is the default state. - Disable, -} - -/// Event that can be emitted by a `NotifsOutHandler`. -#[derive(Debug)] -pub enum NotifsOutHandlerOut { - /// The notifications substream has been accepted by the remote. - Open { - /// Handshake message sent by the remote after we opened the substream. - handshake: Vec, - }, - - /// The notifications substream has been closed by the remote. - Closed, - - /// We tried to open a notifications substream, but the remote refused it. - /// - /// Can only happen if we're in a closed state. - Refused, -} - -impl NotifsOutHandler { - /// Returns true if the substream is currently open. - pub fn is_open(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => true, - State::Opening { .. } => false, - State::Refused => false, - State::Open { .. } => true, - State::Poisoned => false, - } - } - - /// Returns `true` if there has been an attempt to open the substream, but the remote refused - /// the substream. - /// - /// Always returns `false` if the handler is in a disabled state. - pub fn is_refused(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => false, - State::Opening { .. } => false, - State::Refused => true, - State::Open { .. } => false, - State::Poisoned => false, - } - } - - /// Returns the name of the protocol that we negotiate. - pub fn protocol_name(&self) -> &Cow<'static, str> { - &self.protocol_name - } - - /// Polls whether the outbound substream is ready to send a notification. - /// - /// - Returns `Poll::Pending` if the substream is open but not ready to send a notification. - /// - Returns `Poll::Ready(true)` if the substream is ready to send a notification. - /// - Returns `Poll::Ready(false)` if the substream is closed. - /// - pub fn poll_ready(&mut self, cx: &mut Context) -> Poll { - if let State::Open { substream, close_waker, .. } = &mut self.state { - match substream.poll_ready_unpin(cx) { - Poll::Ready(Ok(())) => Poll::Ready(true), - Poll::Ready(Err(_)) => Poll::Ready(false), - Poll::Pending => { - *close_waker = Some(cx.waker().clone()); - Poll::Pending - } - } - } else { - Poll::Ready(false) - } - } - - /// Sends out a notification. - /// - /// If the substream is closed, or not ready to send out a notification yet, then the - /// notification is silently discarded. - /// - /// You are encouraged to call [`NotifsOutHandler::poll_ready`] beforehand to determine - /// whether this will succeed. If `Poll::Ready(true)` is returned, then this method will send - /// out a notification. - pub fn send_or_discard(&mut self, notification: Vec) { - if let State::Open { substream, .. } = &mut self.state { - let _ = substream.start_send_unpin(notification); - } - } -} - -impl ProtocolsHandler for NotifsOutHandler { - type InEvent = NotifsOutHandlerIn; - type OutEvent = NotifsOutHandlerOut; - type Error = void::Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = NotificationsOut; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - proto: >::Output, - (): () - ) { - // We should never reach here. `proto` is a `Void`. - void::unreachable(proto) - } - - fn inject_fully_negotiated_outbound( - &mut self, - (handshake_msg, substream): >::Output, - _: () - ) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Opening { initial_message } => { - let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - self.state = State::Open { substream, initial_message, close_waker: None }; - }, - // If the handler was disabled while we were negotiating the protocol, immediately - // close it. - State::DisabledOpening => self.state = State::DisabledOpen(substream), - - // Any other situation should never happen. - State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => - error!("☎️ State mismatch in notifications handler: substream already open"), - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn inject_event(&mut self, message: NotifsOutHandlerIn) { - match message { - NotifsOutHandlerIn::Enable { initial_message } => { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => { - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - self.state = State::Opening { initial_message }; - }, - State::DisabledOpening => self.state = State::Opening { initial_message }, - State::DisabledOpen(mut sub) => { - // As documented above, in this state we have already called `poll_close` - // once on the substream, and it is unclear whether the substream can then - // be recovered. When in doubt, let's drop the existing substream and - // open a new one. - if sub.close().now_or_never().is_none() { - warn!( - target: "sub-libp2p", - "📞 Improperly closed outbound notifications substream" - ); - } - - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - self.state = State::Opening { initial_message }; - }, - st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => { - debug!(target: "sub-libp2p", - "Tried to enable notifications handler that was already enabled"); - self.state = st; - } - State::Poisoned => error!("Notifications handler in a poisoned state"), - } - } - - NotifsOutHandlerIn::Disable => { - match mem::replace(&mut self.state, State::Poisoned) { - st @ State::Disabled | st @ State::DisabledOpen(_) | st @ State::DisabledOpening => { - debug!(target: "sub-libp2p", - "Tried to disable notifications handler that was already disabled"); - self.state = st; - } - State::Opening { .. } => self.state = State::DisabledOpening, - State::Refused => self.state = State::Disabled, - State::Open { substream, close_waker, .. } => { - if let Some(close_waker) = close_waker { - close_waker.wake(); - } - self.state = State::DisabledOpen(substream) - }, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => {}, - State::DisabledOpen(_) | State::Refused | State::Open { .. } => - error!("☎️ State mismatch in NotificationsOut"), - State::Opening { .. } => { - self.state = State::Refused; - let ev = NotifsOutHandlerOut::Refused; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - }, - State::DisabledOpening => self.state = State::Disabled, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - // We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the - // connection open no matter what, in order to avoid closing and reopening - // connections all the time. - State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => - KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), - State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, - State::Refused | State::Poisoned => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll> { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match &mut self.state { - State::Open { substream, initial_message, close_waker } => - match Sink::poll_flush(Pin::new(substream), cx) { - Poll::Pending | Poll::Ready(Ok(())) => {}, - Poll::Ready(Err(_)) => { - if let Some(close_waker) = close_waker.take() { - close_waker.wake(); - } - - // We try to re-open a substream. - let initial_message = mem::replace(initial_message, Vec::new()); - self.state = State::Opening { initial_message: initial_message.clone() }; - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - } - }, - - State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) { - Poll::Pending => {}, - Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => { - self.state = State::Disabled; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - }, - }, - - _ => {} - } - - Poll::Pending - } -} - -impl fmt::Debug for NotifsOutHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsOutHandler") - .field("open", &self.is_open()) - .finish() - } -} diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index 1b2b97253d1ae..91282d0cf57dd 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -20,7 +20,7 @@ use crate::config::ProtocolId; use bytes::BytesMut; use futures::prelude::*; use futures_codec::Framed; -use libp2p::core::{Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; +use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; use parking_lot::RwLock; use std::{collections::VecDeque, io, pin::Pin, sync::Arc, vec::IntoIter as VecIntoIter}; use std::task::{Context, Poll}; @@ -85,34 +85,18 @@ impl Clone for RegisteredProtocol { pub struct RegisteredProtocolSubstream { /// If true, we are in the process of closing the sink. is_closing: bool, - /// Whether the local node opened this substream (dialer), or we received this substream from - /// the remote (listener). - endpoint: Endpoint, /// Buffer of packets to send. send_queue: VecDeque, /// If true, we should call `poll_complete` on the inner sink. requires_poll_flush: bool, /// The underlying substream. inner: stream::Fuse>>, - /// Version of the protocol that was negotiated. - protocol_version: u8, /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one /// unless the buffer empties then fills itself again. clogged_fuse: bool, } impl RegisteredProtocolSubstream { - /// Returns the version of the protocol that was negotiated. - pub fn protocol_version(&self) -> u8 { - self.protocol_version - } - - /// Returns whether the local node opened this substream (dialer), or we received this - /// substream from the remote (listener). - pub fn endpoint(&self) -> Endpoint { - self.endpoint - } - /// Starts a graceful shutdown process on this substream. /// /// Note that "graceful" means that we sent a closing message. We don't wait for any @@ -246,7 +230,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, fn upgrade_inbound( self, socket: TSubstream, - info: Self::Info, + _: Self::Info, ) -> Self::Future { Box::pin(async move { let mut framed = { @@ -262,11 +246,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, Ok((RegisteredProtocolSubstream { is_closing: false, - endpoint: Endpoint::Listener, send_queue: VecDeque::new(), requires_poll_flush: false, inner: framed.fuse(), - protocol_version: info.version, clogged_fuse: false, }, received_handshake.to_vec())) }) @@ -283,7 +265,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, fn upgrade_outbound( self, socket: TSubstream, - info: Self::Info, + _: Self::Info, ) -> Self::Future { Box::pin(async move { let mut framed = { @@ -301,11 +283,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, Ok((RegisteredProtocolSubstream { is_closing: false, - endpoint: Endpoint::Dialer, send_queue: VecDeque::new(), requires_poll_flush: false, inner: framed.fuse(), - protocol_version: info.version, clogged_fuse: false, }, received_handshake.to_vec())) }) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 93abbbad02495..5fc8485947ff5 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -39,7 +39,7 @@ use crate::{ }, on_demand_layer::AlwaysBadChecker, light_client_handler, block_requests, finality_requests, - protocol::{self, event::Event, NotifsHandlerError, LegacyConnectionKillError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, + protocol::{self, event::Event, NotifsHandlerError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, transport, ReputationChange, }; use futures::{channel::oneshot, prelude::*}; @@ -1589,9 +1589,6 @@ impl Future for NetworkWorker { Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::A(EitherError::B( EitherError::A(PingFailure::Timeout)))))))))) => "ping-timeout", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))))) => "force-closed", Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::A(EitherError::A( NotifsHandlerError::SyncNotificationsClogged))))))))) => "sync-notifications-clogged", From 08b217a0650635f8430655979c581f0cebe4bed3 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 16 Nov 2020 19:49:50 +0100 Subject: [PATCH 16/34] *: Update to libp2p v0.30.0 (#7508) * *: Update to libp2p v0.30.0 * Cargo.lock: Update * *: Update to libp2p v0.30.1 --- Cargo.lock | 223 ++++++++++-------- bin/node/browser-testing/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 4 +- client/network/src/discovery.rs | 16 +- client/network/src/light_client_handler.rs | 2 +- .../src/protocol/generic_proto/tests.rs | 2 +- client/network/src/request_responses.rs | 4 +- client/network/src/transport.rs | 8 +- client/network/test/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- primitives/consensus/common/Cargo.toml | 2 +- utils/browser/Cargo.toml | 2 +- 16 files changed, 148 insertions(+), 129 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fca7a32c96d9c..1b8b13c2e2a48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -262,7 +262,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38628c78a34f111c5a6b98fc87dfc056cd1590b61afe748b145be4623c56d194" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "concurrent-queue", "fastrand", "futures-lite", @@ -389,7 +389,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" dependencies = [ "addr2line", - "cfg-if", + "cfg-if 0.1.10", "libc", "miniz_oxide", "object 0.20.0", @@ -438,7 +438,7 @@ checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" dependencies = [ "bitflags", "cexpr", - "cfg-if", + "cfg-if 0.1.10", "clang-sys", "clap", "env_logger", @@ -605,6 +605,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + [[package]] name = "bstr" version = "0.2.13" @@ -718,6 +724,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chacha20" version = "0.4.3" @@ -827,7 +839,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "wasm-bindgen", ] @@ -987,7 +999,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -1044,7 +1056,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ "autocfg 1.0.0", - "cfg-if", + "cfg-if 0.1.10", "crossbeam-utils", "lazy_static", "maybe-uninit", @@ -1058,7 +1070,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "crossbeam-utils", "maybe-uninit", ] @@ -1070,7 +1082,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ "autocfg 1.0.0", - "cfg-if", + "cfg-if 0.1.10", "lazy_static", ] @@ -1225,7 +1237,7 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "dirs-sys", ] @@ -1577,7 +1589,7 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "crc32fast", "libc", "libz-sys", @@ -2065,7 +2077,7 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "wasi", "wasm-bindgen", @@ -2077,7 +2089,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "wasi", ] @@ -2885,9 +2897,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.29.1" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021f703bfef6e3da78ef9828c8a244d639b8d57eedf58360922aca5ff69dfdcd" +checksum = "e3c2b4c99f8798be90746fc226acf95d3e6cff0655883634cc30dab1f64f438b" dependencies = [ "atomic", "bytes 0.5.6", @@ -2924,12 +2936,12 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.23.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3960524389409633550567e8a9e0684d25a33f4f8408887ff897dd9fdfbdb771" +checksum = "1b8186060d6bd415e4e928e6cb44c4fe7e7a7dd53437bd936ce7e5f421e45a51" dependencies = [ "asn1_der", - "bs58", + "bs58 0.4.0", "ed25519-dalek", "either", "fnv", @@ -2968,9 +2980,9 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567962c5c5f8a1282979441300e1739ba939024010757c3dbfab4d462189df77" +checksum = "34aea69349e70a58ef9ecd21ac12c5eaa36255ac6986828079d26393f9e618cb" dependencies = [ "flate2", "futures 0.3.5", @@ -2979,9 +2991,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436280f5fe21a58fcaff82c2606945579241f32bc0eaf2d39321aa4624a66e7f" +checksum = "0baeff71fb5cb1fe1604f74a712a44b66a8c5900f4022411a1d550f09d6bb776" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -2990,9 +3002,9 @@ dependencies = [ [[package]] name = "libp2p-floodsub" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc175613c5915332fd6458895407ec242ea055ae3b107a586626d5e3349350a" +checksum = "db0f925a45f310b678e70faf71a10023b829d02eb9cc2628a63de928936f3ade" dependencies = [ "cuckoofilter", "fnv", @@ -3008,9 +3020,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d500ad89ba14de4d18bebdff61a0ce3e769f1c5c5a95026c5da90187e5fff5c9" +checksum = "efeb65567174974f551a91f9f5719445b6695cad56f6a7a47a27111f37efb6b8" dependencies = [ "base64 0.13.0", "byteorder", @@ -3034,9 +3046,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b90b350e37f398b73d778bd94422f4e6a3afa2c1582742ce2446b8a0dba787" +checksum = "e074124669840484de564901d47f2d0892e73f6d8ee7c37e9c2644af1b217bf4" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3050,9 +3062,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb78341f114bf686d5fe50b33ff1a804d88fb326c0d39ee1c22db4346b21fc27" +checksum = "78a2653b2e3254a3bbeb66bfc3f0dca7d6cba6aa2a96791db114003dec1b5394" dependencies = [ "arrayvec 0.5.1", "bytes 0.5.6", @@ -3077,9 +3089,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b575514fce0a3ccbd065d6aa377bd4d5102001b05c1a22a5eee49c450254ef0f" +checksum = "786b068098794322239f8f04df88a52daeb7863b2e77501c4d85d32e0a8f2d26" dependencies = [ "async-std", "data-encoding", @@ -3099,9 +3111,9 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "696c8ee8b42496690b88b0de84a96387caf6e09880bcc8e794bb88afa054e995" +checksum = "ed764eab613a8fb6b7dcf6c796f55a06fef2270e528329903e25cd3311b99663" dependencies = [ "bytes 0.5.6", "futures 0.3.5", @@ -3117,9 +3129,9 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c77142e3e5b18fefa7d267305c777c9cbe9b2232ec489979390100bebcc1e6" +checksum = "fb441fb015ec16690099c5d910fcba271d357763b3dcb784db7b27bbb0b68372" dependencies = [ "bytes 0.5.6", "curve25519-dalek 3.0.0", @@ -3139,9 +3151,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7257135609e8877f4d286935cbe1e572b2018946881c3e7f63054577074a7ee7" +checksum = "82e5c50936cfdbe96a514e8992f304fa44cd3a681b6f779505f1ae62b3474705" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3154,9 +3166,9 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.23.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c88d59ba3e710a8c8e0535cb4a52e9e46534924cbbea4691f8c3aaad17b58c61" +checksum = "21026557c335d3639591f247b19b7536195772034ec7e9c463137227f95eaaa1" dependencies = [ "bytes 0.5.6", "futures 0.3.5", @@ -3185,9 +3197,9 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02ba1aa5727ccc118c09ba5111480873f2fe5608cb304e258fd12c173ecf27c9" +checksum = "2dd9a1e0e6563dec1c9e702f7e68bdaa43da62a84536aa06372d3fed3e25d4ca" dependencies = [ "async-trait", "bytes 0.5.6", @@ -3205,9 +3217,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffa6fa33b16956b8a58afbfebe1406866011a1ab8960765bd36868952d7be6a1" +checksum = "565f0e06674b4033c978471e4083d5aaa8e03cef0719a0ec0905aaeaad39a919" dependencies = [ "either", "futures 0.3.5", @@ -3221,9 +3233,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0b6f4ef48d9493607fae069deecce0579320a1f3de6cb056770b151018a9a5" +checksum = "33f3dce259c0d3127af5167f45c275b6c047320efdd0e40fde947482487af0a3" dependencies = [ "async-std", "futures 0.3.5", @@ -3237,9 +3249,9 @@ dependencies = [ [[package]] name = "libp2p-uds" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945bed3c989a1b290b5a0d4e8fa6e44e01840efb9a5ab3f0d3d174f0e451ac0e" +checksum = "5e0aba04370a00d8d0236e350bc862926c1b42542a169aa6a481e660e5b990fe" dependencies = [ "async-std", "futures 0.3.5", @@ -3249,9 +3261,9 @@ dependencies = [ [[package]] name = "libp2p-wasm-ext" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66518a4455e15c283637b4d7b579aef928b75a3fc6c50a41e7e6b9fa86672ca0" +checksum = "6c703816f4170477a375b49c56d349e535ce68388f81ba1d9a3c8e2517effa82" dependencies = [ "futures 0.3.5", "js-sys", @@ -3263,9 +3275,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc561870477523245efaaea1b6b743c70115f10c670e62bcbbe4d3153be5f0c" +checksum = "8d5e7268a959748040a0cf7456ad655be55b87f0ceda03bdb5b53674726b28f7" dependencies = [ "async-tls", "either", @@ -3283,9 +3295,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.26.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c0c9b6ef7a168c2ae854170b0b6b77550599afe06cc3ac390eb45c5d9c7110" +checksum = "1a0798cbb58535162c40858493d09af06eac42a26e4966e58de0df701f559348" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3411,7 +3423,7 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -3548,18 +3560,18 @@ dependencies = [ [[package]] name = "minicbor" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2ef6aa869726518c5d8206fa5d1337bda8a0442807611be617891c018fa781" +checksum = "0164190d1771b1458c3742075b057ed55d25cd9dfb930aade99315a1eb1fe12d" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b3569c0dbfff1b8d5f1434c642b67f5bf81c0f354a3f5f8f180b549dba3c07c" +checksum = "2e071b3159835ee91df62dbdbfdd7ec366b7ea77c838f43aff4acda6b61bcfb9" dependencies = [ "proc-macro2", "quote", @@ -3581,7 +3593,7 @@ version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fuchsia-zircon", "fuchsia-zircon-sys", "iovec", @@ -3659,17 +3671,17 @@ checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" [[package]] name = "multihash" -version = "0.11.2" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03" +checksum = "567122ab6492f49b59def14ecc36e13e64dca4188196dd0cd41f9f3f979f3df6" dependencies = [ "blake2b_simd", "blake2s_simd", - "digest 0.8.1", - "sha-1", - "sha2 0.8.2", - "sha3 0.8.2", - "unsigned-varint 0.3.3", + "digest 0.9.0", + "sha-1 0.9.2", + "sha2 0.9.1", + "sha3 0.9.1", + "unsigned-varint 0.5.1", ] [[package]] @@ -3680,9 +3692,9 @@ checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" [[package]] name = "multistream-select" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a6aa6e32fbaf16795142335967214b8564a7a4661eb6dc846ef343a6e00ac1" +checksum = "93faf2e41f9ee62fb01680ed48f3cc26652352327aa2e59869070358f6b7dd75" dependencies = [ "bytes 0.5.6", "futures 0.3.5", @@ -3724,7 +3736,7 @@ version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] @@ -3737,7 +3749,7 @@ checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363" dependencies = [ "bitflags", "cc", - "cfg-if", + "cfg-if 0.1.10", "libc", "void", ] @@ -5215,12 +5227,12 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7ad66970bbab360c97179b60906e2dc4aef1f7fca8ab4e5c5db8c97b16814a" +checksum = "22fe99b938abd57507e37f8d4ef30cd74b33c71face2809b37b8beb71bab15ab" dependencies = [ "arrayref", - "bs58", + "bs58 0.4.0", "byteorder", "data-encoding", "multihash", @@ -5287,7 +5299,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "ethereum-types", "hashbrown 0.8.1", "impl-trait-for-tuples", @@ -5338,7 +5350,7 @@ dependencies = [ "mio", "mio-extras", "rand 0.7.3", - "sha-1", + "sha-1 0.8.2", "slab", "url 2.1.1", ] @@ -5410,7 +5422,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi 0.0.3", "libc", "redox_syscall", @@ -5425,7 +5437,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi 0.0.3", "libc", "redox_syscall", @@ -5439,7 +5451,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi 0.1.0", "instant", "libc", @@ -5557,7 +5569,7 @@ checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" dependencies = [ "maplit", "pest", - "sha-1", + "sha-1 0.8.2", ] [[package]] @@ -5652,7 +5664,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0720e0b9ea9d52451cf29d3413ba8a9303f8815d9d9653ef70e03ff73e65566" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "log", "wepoll-sys-stjepang", @@ -5674,7 +5686,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "universal-hash", ] @@ -5797,7 +5809,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30d70cf4412832bcac9cffe27906f4a66e450d323525e977168c70d1b36120ae" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fnv", "lazy_static", "parking_lot 0.11.0", @@ -7153,7 +7165,7 @@ dependencies = [ "async-std", "async-trait", "bitflags", - "bs58", + "bs58 0.3.1", "bytes 0.5.6", "derive_more", "either", @@ -7842,6 +7854,19 @@ dependencies = [ "opaque-debug 0.2.3", ] +[[package]] +name = "sha-1" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce3cdf1b5e620a498ee6f2a171885ac7e22f0e12089ec4b3d22b84921792507c" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + [[package]] name = "sha2" version = "0.8.2" @@ -7861,7 +7886,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" dependencies = [ "block-buffer 0.9.0", - "cfg-if", + "cfg-if 0.1.10", "cpuid-bool", "digest 0.9.0", "opaque-debug 0.3.0", @@ -8012,7 +8037,7 @@ version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "redox_syscall", "winapi 0.3.9", @@ -8031,7 +8056,7 @@ dependencies = [ "httparse", "log", "rand 0.7.3", - "sha-1", + "sha-1 0.8.2", ] [[package]] @@ -9049,7 +9074,7 @@ dependencies = [ name = "substrate-test-runtime" version = "2.0.0" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "frame-executive", "frame-support", "frame-system", @@ -9237,7 +9262,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "rand 0.7.3", "redox_syscall", @@ -9685,7 +9710,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "log", "pin-project-lite", "tracing-attributes", @@ -9928,12 +9953,6 @@ dependencies = [ "subtle 2.2.3", ] -[[package]] -name = "unsigned-varint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9" - [[package]] name = "unsigned-varint" version = "0.4.0" @@ -10075,7 +10094,7 @@ version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "serde", "serde_json", "wasm-bindgen-macro", @@ -10102,7 +10121,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "js-sys", "wasm-bindgen", "web-sys", @@ -10232,7 +10251,7 @@ checksum = "1cd3c4f449382779ef6e0a7c3ec6752ae614e20a42e4100000c3efdc973100e2" dependencies = [ "anyhow", "backtrace", - "cfg-if", + "cfg-if 0.1.10", "lazy_static", "libc", "log", @@ -10274,7 +10293,7 @@ dependencies = [ "anyhow", "base64 0.12.3", "bincode", - "cfg-if", + "cfg-if 0.1.10", "cranelift-codegen", "cranelift-entity", "cranelift-frontend", @@ -10303,7 +10322,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e914c013c7a9f15f4e429d5431f2830fb8adb56e40567661b69c5ec1d645be23" dependencies = [ "anyhow", - "cfg-if", + "cfg-if 0.1.10", "cranelift-codegen", "cranelift-entity", "cranelift-frontend", @@ -10346,7 +10365,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e8d4d1af8dd5f7096cfcc89dd668d358e52980c38cce199643372ffd6590e27" dependencies = [ "anyhow", - "cfg-if", + "cfg-if 0.1.10", "gimli 0.21.0", "lazy_static", "libc", @@ -10366,7 +10385,7 @@ checksum = "3a25f140bbbaadb07c531cba99ce1a966dba216138dc1b2a0ddecec851a01a93" dependencies = [ "backtrace", "cc", - "cfg-if", + "cfg-if 0.1.10", "indexmap", "lazy_static", "libc", diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index c90c4a293f49a..fade57d8124f3 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.30.1", default-features = false } jsonrpc-core = "15.0.0" serde = "1.0.106" serde_json = "1.0.48" diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index ff6c26bbee53e..40b929fc8a0f4 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -24,7 +24,7 @@ derive_more = "0.99.2" either = "1.5.3" futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.29.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.30.1", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} prost = "0.6.1" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 51c499828ac2a..942d30e90db5b 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -21,7 +21,7 @@ ansi_term = "0.12.1" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.4" fdlimit = "0.2.1" -libp2p = "0.29.1" +libp2p = "0.30.1" parity-scale-codec = "1.3.0" hex = "0.4.2" rand = "0.7.3" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 94d9272f4bbd2..c120ff515c7ad 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.30.1", default-features = false } log = "0.4.8" lru = "0.4.3" sc-network = { version = "0.8.0", path = "../network" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index f5ebee39db563..6b66fd0cdee65 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -64,13 +64,13 @@ wasm-timer = "0.2" zeroize = "1.0.0" [dependencies.libp2p] -version = "0.29.1" +version = "0.30.1" default-features = false features = ["identify", "kad", "mdns-async-std", "mplex", "noise", "ping", "request-response", "tcp-async-std", "websocket", "yamux"] [dev-dependencies] assert_matches = "1.3" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.30.1", default-features = false } quickcheck = "0.9.0" rand = "0.7.2" sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index f9bda6aabf5f8..60d35dbdf1ae0 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -51,11 +51,11 @@ use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; use libp2p::core::{connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, PublicKey}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use libp2p::swarm::protocols_handler::multi::MultiHandler; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler, IntoProtocolsHandler}; +use libp2p::swarm::protocols_handler::multi::IntoMultiHandler; use libp2p::kad::{Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, QueryResult, Quorum, Record}; use libp2p::kad::GetClosestPeersError; -use libp2p::kad::handler::KademliaHandler; +use libp2p::kad::handler::KademliaHandlerProto; use libp2p::kad::QueryId; use libp2p::kad::record::{self, store::{MemoryStore, RecordStore}}; #[cfg(not(target_os = "unknown"))] @@ -444,14 +444,14 @@ pub enum DiscoveryOut { } impl NetworkBehaviour for DiscoveryBehaviour { - type ProtocolsHandler = MultiHandler>; + type ProtocolsHandler = IntoMultiHandler>; type OutEvent = DiscoveryOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { let iter = self.kademlias.iter_mut() .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); - MultiHandler::try_from_iter(iter) + IntoMultiHandler::try_from_iter(iter) .expect("There can be at most one handler per `ProtocolId` and \ protocol names contain the `ProtocolId` so no two protocol \ names in `self.kademlias` can be equal which is the only error \ @@ -534,7 +534,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { &mut self, peer_id: PeerId, connection: ConnectionId, - (pid, event): ::OutEvent, + (pid, event): <::Handler as ProtocolsHandler>::OutEvent, ) { if let Some(kad) = self.kademlias.get_mut(&pid) { return kad.inject_event(peer_id, connection, event) @@ -598,7 +598,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { params: &mut impl PollParameters, ) -> Poll< NetworkBehaviourAction< - ::InEvent, + <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent, >, > { @@ -816,7 +816,7 @@ mod tests { let transport = MemoryTransport .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(yamux::Config::default()) + .multiplex(yamux::YamuxConfig::default()) .boxed(); let behaviour = { diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index e7c5e9c1c9b95..b72362fdfc362 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -1355,7 +1355,7 @@ mod tests { let transport = MemoryTransport::default() .upgrade(upgrade::Version::V1) .authenticate(NoiseConfig::xx(dh_key).into_authenticated()) - .multiplex(yamux::Config::default()) + .multiplex(yamux::YamuxConfig::default()) .boxed(); Swarm::new(transport, LightClientHandler::new(cf, client, checker, ps), local_peer) } diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs index 7a040a403af73..9c45c62f8bb4c 100644 --- a/client/network/src/protocol/generic_proto/tests.rs +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -54,7 +54,7 @@ fn build_nodes() -> (Swarm, Swarm) { let transport = MemoryTransport .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(yamux::Config::default()) + .multiplex(yamux::YamuxConfig::default()) .timeout(Duration::from_secs(20)) .boxed(); diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 5e414248674f0..a3a68f719d6bb 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -680,7 +680,7 @@ mod tests { let transport = MemoryTransport .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(libp2p::yamux::Config::default()) + .multiplex(libp2p::yamux::YamuxConfig::default()) .boxed(); let behaviour = { @@ -783,7 +783,7 @@ mod tests { let transport = MemoryTransport .upgrade(upgrade::Version::V1) .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(libp2p::yamux::Config::default()) + .multiplex(libp2p::yamux::YamuxConfig::default()) .boxed(); let behaviour = { diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 80d897633fd72..035b3a9716a02 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -104,13 +104,13 @@ pub fn build_transport( let multiplexing_config = { let mut mplex_config = mplex::MplexConfig::new(); - mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); - mplex_config.max_buffer_len(usize::MAX); + mplex_config.set_max_buffer_behaviour(mplex::MaxBufferBehaviour::Block); + mplex_config.set_max_buffer_size(usize::MAX); - let mut yamux_config = libp2p::yamux::Config::default(); + let mut yamux_config = libp2p::yamux::YamuxConfig::default(); // Enable proper flow-control: window updates are only sent when // buffered data has been consumed. - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::OnRead); + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) }; diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index a8bf98a75ed61..a74aa90d4f4ce 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.30.1", default-features = false } sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } sc-client-api = { version = "2.0.0", path = "../../api" } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 459f4a9302045..efca00a24deb6 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.30.1", default-features = false } sp-utils = { version = "2.0.0", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 18812a8c71e43..bff7842bec4f5 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" wasm-timer = "0.2.0" -libp2p = { version = "0.29.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +libp2p = { version = "0.30.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index db85244dcfa83..001f0e3679458 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = "1.0.21" -libp2p = { version = "0.29.1", default-features = false } +libp2p = { version = "0.30.1", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "2.0.0"} sp-inherents = { version = "2.0.0", path = "../../inherents" } diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 90668f4e51fe8..9efc8c396680e 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3", features = ["compat"] } futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.23", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.24", features = ["websocket"] } console_error_panic_hook = "0.1.6" console_log = "0.1.2" js-sys = "0.3.34" From 11ace4ef8b2ad176293ad6db2b3dd795befd2c79 Mon Sep 17 00:00:00 2001 From: Andrew Plaza Date: Mon, 16 Nov 2020 22:50:44 +0100 Subject: [PATCH 17/34] make ClientConfig public (#7544) --- client/service/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 8e6b0037bdf93..a23ebf3d553d5 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -80,7 +80,7 @@ pub use sc_tracing::TracingReceiver; pub use task_manager::SpawnTaskHandle; pub use task_manager::TaskManager; pub use sp_consensus::import_queue::ImportQueue; -pub use self::client::LocalCallExecutor; +pub use self::client::{LocalCallExecutor, ClientConfig}; use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; const DEFAULT_PROTOCOL_ID: &str = "sup"; From 407cd3af06b03201a506b645c9ee587e5d4748bb Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Wed, 18 Nov 2020 13:56:16 +0800 Subject: [PATCH 18/34] sc-basic-authorship: remove useless dependencies (#7550) Signed-off-by: koushiro --- Cargo.lock | 66 ++++-------------------------- client/basic-authorship/Cargo.toml | 1 - 2 files changed, 8 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1b8b13c2e2a48..3a87dfec66403 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1878,27 +1878,12 @@ dependencies = [ "futures-sink", ] -[[package]] -name = "futures-channel-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5e5f4df964fa9c1c2f8bddeb5c3611631cacd93baf810fc8bb2fb4b495c263a" -dependencies = [ - "futures-core-preview", -] - [[package]] name = "futures-core" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" -[[package]] -name = "futures-core-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35b6263fb1ef523c3056565fa67b1d16f0a8604ff12b11b08c25f28a734c60a" - [[package]] name = "futures-cpupool" version = "0.1.8" @@ -2022,18 +2007,6 @@ dependencies = [ "slab", ] -[[package]] -name = "futures-util-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce968633c17e5f97936bd2797b6e38fb56cf16a7422319f7ec2e30d3c470e8d" -dependencies = [ - "futures-channel-preview", - "futures-core-preview", - "pin-utils", - "slab", -] - [[package]] name = "futures_codec" version = "0.4.1" @@ -2412,7 +2385,7 @@ dependencies = [ "time", "tokio 0.1.22", "tokio-buf", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-io", "tokio-reactor", "tokio-tcp", @@ -6518,7 +6491,6 @@ dependencies = [ "sp-transaction-pool", "substrate-prometheus-endpoint", "substrate-test-runtime-client", - "tokio-executor 0.2.0-alpha.6", ] [[package]] @@ -9404,11 +9376,11 @@ dependencies = [ "num_cpus", "tokio-codec", "tokio-current-thread", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-fs", "tokio-io", "tokio-reactor", - "tokio-sync 0.1.8", + "tokio-sync", "tokio-tcp", "tokio-threadpool", "tokio-timer", @@ -9468,7 +9440,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ "futures 0.1.29", - "tokio-executor 0.1.10", + "tokio-executor", ] [[package]] @@ -9481,17 +9453,6 @@ dependencies = [ "futures 0.1.29", ] -[[package]] -name = "tokio-executor" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ee9ceecf69145923834ea73f32ba40c790fd877b74a7817dd0b089f1eb9c7c8" -dependencies = [ - "futures-util-preview", - "lazy_static", - "tokio-sync 0.2.0-alpha.6", -] - [[package]] name = "tokio-fs" version = "0.1.7" @@ -9552,9 +9513,9 @@ dependencies = [ "num_cpus", "parking_lot 0.9.0", "slab", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-io", - "tokio-sync 0.1.8", + "tokio-sync", ] [[package]] @@ -9588,17 +9549,6 @@ dependencies = [ "futures 0.1.29", ] -[[package]] -name = "tokio-sync" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1aaeb685540f7407ea0e27f1c9757d258c7c6bf4e3eb19da6fc59b747239d2" -dependencies = [ - "fnv", - "futures-core-preview", - "futures-util-preview", -] - [[package]] name = "tokio-tcp" version = "0.1.4" @@ -9627,7 +9577,7 @@ dependencies = [ "log", "num_cpus", "slab", - "tokio-executor 0.1.10", + "tokio-executor", ] [[package]] @@ -9639,7 +9589,7 @@ dependencies = [ "crossbeam-utils", "futures 0.1.29", "slab", - "tokio-executor 0.1.10", + "tokio-executor", ] [[package]] diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 1b1d8921bcfb3..f097d8044f612 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -29,7 +29,6 @@ sc-telemetry = { version = "2.0.0", path = "../telemetry" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } sc-block-builder = { version = "0.8.0", path = "../block-builder" } sc-proposer-metrics = { version = "0.8.0", path = "../proposer-metrics" } -tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } [dev-dependencies] sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } From 2d97a12fb7070a5fbc3dcc9d2918247df69d6245 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 18 Nov 2020 12:19:22 +0100 Subject: [PATCH 19/34] Add slashing events to elections-phragmen. (#7543) * Add slashing events to elections-phragmen. * Fix build * Apply suggestions from code review * Update frame/elections-phragmen/src/lib.rs * Update frame/elections-phragmen/src/lib.rs Co-authored-by: Guillaume Thiolliere Co-authored-by: Guillaume Thiolliere --- frame/elections-phragmen/src/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index cf3864f2e3f92..be47b5adcce5e 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -682,6 +682,10 @@ decl_event!( /// A \[member\] has been removed. This should always be followed by either `NewTerm` or /// `EmptyTerm`. MemberKicked(AccountId), + /// A candidate was slashed due to failing to obtain a seat as member or runner-up + CandidateSlashed(AccountId, Balance), + /// A seat holder (member or runner-up) was slashed due to failing to retaining their position. + SeatHolderSlashed(AccountId, Balance), /// A \[member\] has renounced their candidacy. MemberRenounced(AccountId), /// A voter was reported with the the report being successful or not. @@ -995,6 +999,7 @@ impl Module { new_runners_up_ids_sorted.binary_search(&c).is_err() { let (imbalance, _) = T::Currency::slash_reserved(&c, T::CandidacyBond::get()); + Self::deposit_event(RawEvent::CandidateSlashed(c, T::CandidacyBond::get())); T::LoserCandidate::on_unbalanced(imbalance); } }); @@ -1002,6 +1007,7 @@ impl Module { // Burn outgoing bonds to_burn_bond.into_iter().for_each(|x| { let (imbalance, _) = T::Currency::slash_reserved(&x, T::CandidacyBond::get()); + Self::deposit_event(RawEvent::SeatHolderSlashed(x, T::CandidacyBond::get())); T::LoserCandidate::on_unbalanced(imbalance); }); From c180950de63dbdfaa11053a8a56f35e694ed9e36 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 18 Nov 2020 16:05:35 +0100 Subject: [PATCH 20/34] Remove necessity to pass ConsensusEngineId when registering notifications protocol (#7549) * Remove necessity to pass ConsensusEngineId when registering notifications protocol * Line width * Fix tests protocol name * Other renames * Doc update * Change issue in TODO --- .../finality-grandpa/src/communication/mod.rs | 2 - .../src/communication/tests.rs | 23 ++-- client/finality-grandpa/src/lib.rs | 1 - client/finality-grandpa/src/tests.rs | 4 +- client/network-gossip/src/bridge.rs | 51 ++++--- client/network-gossip/src/lib.rs | 18 ++- client/network-gossip/src/state_machine.rs | 54 +++++--- client/network/src/behaviour.rs | 31 ++--- client/network/src/config.rs | 7 +- client/network/src/gossip.rs | 9 +- client/network/src/gossip/tests.rs | 14 +- client/network/src/protocol.rs | 82 ++++------- client/network/src/protocol/event.rs | 8 +- client/network/src/protocol/message.rs | 2 +- client/network/src/service.rs | 127 +++++++----------- client/network/src/service/out_events.rs | 29 ++-- client/network/src/service/tests.rs | 46 +++---- client/network/test/src/lib.rs | 4 +- 18 files changed, 228 insertions(+), 284 deletions(-) diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 3daffcb9f2522..038d82a8cdc3b 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -68,7 +68,6 @@ mod periodic; #[cfg(test)] pub(crate) mod tests; -pub use sp_finality_grandpa::GRANDPA_ENGINE_ID; pub const GRANDPA_PROTOCOL_NAME: &'static str = "/paritytech/grandpa/1"; // cost scalars for reporting peers. @@ -215,7 +214,6 @@ impl> NetworkBridge { let validator = Arc::new(validator); let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( service.clone(), - GRANDPA_ENGINE_ID, GRANDPA_PROTOCOL_NAME, validator.clone() ))); diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 1a773acd6d0fb..e1685256f7b8d 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -24,10 +24,11 @@ use sc_network_gossip::Validator; use std::sync::Arc; use sp_keyring::Ed25519Keyring; use parity_scale_codec::Encode; -use sp_runtime::{ConsensusEngineId, traits::NumberFor}; +use sp_runtime::traits::NumberFor; use std::{borrow::Cow, pin::Pin, task::{Context, Poll}}; +use crate::communication::GRANDPA_PROTOCOL_NAME; use crate::environment::SharedVoterSetState; -use sp_finality_grandpa::{AuthorityList, GRANDPA_ENGINE_ID}; +use sp_finality_grandpa::AuthorityList; use super::gossip::{self, GossipValidator}; use super::{VoterSet, Round, SetId}; @@ -57,11 +58,11 @@ impl sc_network_gossip::Network for TestNetwork { fn disconnect_peer(&self, _: PeerId) {} - fn write_notification(&self, who: PeerId, _: ConsensusEngineId, message: Vec) { + fn write_notification(&self, who: PeerId, _: Cow<'static, str>, message: Vec) { let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} + fn register_notifications_protocol(&self, _: Cow<'static, str>) {} fn announce(&self, block: Hash, _associated_data: Vec) { let _ = self.sender.unbounded_send(Event::Announce(block)); @@ -86,7 +87,7 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { >::write_notification( self, who.clone(), - GRANDPA_ENGINE_ID, + GRANDPA_PROTOCOL_NAME.into(), data, ); } @@ -287,20 +288,20 @@ fn good_commit_leads_to_relay() { // Add the sending peer and send the commit let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: sender_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), - messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], + messages: vec![(GRANDPA_PROTOCOL_NAME.into(), commit_to_send.clone().into())], }); // Add a random peer which will be the recipient of this message let receiver_id = sc_network::PeerId::random(); let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: receiver_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), role: ObservedRole::Full, }); @@ -319,7 +320,7 @@ fn good_commit_leads_to_relay() { sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: receiver_id, - messages: vec![(GRANDPA_ENGINE_ID, msg.encode().into())], + messages: vec![(GRANDPA_PROTOCOL_NAME.into(), msg.encode().into())], }) }; @@ -434,12 +435,12 @@ fn bad_commit_leads_to_report() { Event::EventStream(sender) => { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: sender_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), - messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], + messages: vec![(GRANDPA_PROTOCOL_NAME.into(), commit_to_send.clone().into())], }); true diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 6ab95d7eac970..18b439abf5e68 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -1085,7 +1085,6 @@ where // to receive GRANDPA messages on the network. We don't process the // messages. network.register_notifications_protocol( - communication::GRANDPA_ENGINE_ID, From::from(communication::GRANDPA_PROTOCOL_NAME), ); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 175c5360b2c13..44503d3c85d44 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -99,9 +99,7 @@ impl TestNetFactory for GrandpaTestNet { fn add_full_peer(&mut self) { self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![ - (communication::GRANDPA_ENGINE_ID, communication::GRANDPA_PROTOCOL_NAME.into()) - ], + notifications_protocols: vec![communication::GRANDPA_PROTOCOL_NAME.into()], ..Default::default() }) } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 70c2942597aa5..98ada69590f1c 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -23,7 +23,7 @@ use futures::prelude::*; use futures::channel::mpsc::{channel, Sender, Receiver}; use libp2p::PeerId; use log::trace; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, collections::{HashMap, VecDeque}, @@ -38,7 +38,7 @@ pub struct GossipEngine { state_machine: ConsensusGossip, network: Box + Send>, periodic_maintenance_interval: futures_timer::Delay, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Incoming events from the network. network_event_stream: Pin + Send>>, @@ -68,20 +68,21 @@ impl GossipEngine { /// Create a new instance. pub fn new + Send + Clone + 'static>( network: N, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol: impl Into>, validator: Arc>, ) -> Self where B: 'static { + let protocol = protocol.into(); + // We grab the event stream before registering the notifications protocol, otherwise we // might miss events. let network_event_stream = network.event_stream(); - network.register_notifications_protocol(engine_id, protocol_name.into()); + network.register_notifications_protocol(protocol.clone()); GossipEngine { - state_machine: ConsensusGossip::new(validator, engine_id), + state_machine: ConsensusGossip::new(validator, protocol.clone()), network: Box::new(network), periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), - engine_id, + protocol, network_event_stream, message_sinks: HashMap::new(), @@ -181,21 +182,21 @@ impl Future for GossipEngine { ForwardingState::Idle => { match this.network_event_stream.poll_next_unpin(cx) { Poll::Ready(Some(event)) => match event { - Event::NotificationStreamOpened { remote, engine_id, role } => { - if engine_id != this.engine_id { + Event::NotificationStreamOpened { remote, protocol, role } => { + if protocol != this.protocol { continue; } this.state_machine.new_peer(&mut *this.network, remote, role); } - Event::NotificationStreamClosed { remote, engine_id } => { - if engine_id != this.engine_id { + Event::NotificationStreamClosed { remote, protocol } => { + if protocol != this.protocol { continue; } this.state_machine.peer_disconnected(&mut *this.network, remote); }, Event::NotificationsReceived { remote, messages } => { let messages = messages.into_iter().filter_map(|(engine, data)| { - if engine == this.engine_id { + if engine == this.protocol { Some(data.to_vec()) } else { None @@ -299,6 +300,7 @@ mod tests { use rand::Rng; use sc_network::ObservedRole; use sp_runtime::{testing::H256, traits::{Block as BlockT}}; + use std::borrow::Cow; use std::convert::TryInto; use std::sync::{Arc, Mutex}; use substrate_test_runtime_client::runtime::Block; @@ -329,11 +331,11 @@ mod tests { unimplemented!(); } - fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { + fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { unimplemented!(); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} + fn register_notifications_protocol(&self, _: Cow<'static, str>) {} fn announce(&self, _: B::Hash, _: Vec) { unimplemented!(); @@ -361,8 +363,7 @@ mod tests { let network = TestNetwork::default(); let mut gossip_engine = GossipEngine::::new( network.clone(), - [1, 2, 3, 4], - "my_protocol", + "/my_protocol", Arc::new(AllowAll{}), ); @@ -383,14 +384,13 @@ mod tests { #[test] fn keeps_multiple_subscribers_per_topic_updated_with_both_old_and_new_messages() { let topic = H256::default(); - let engine_id = [1, 2, 3, 4]; + let protocol = Cow::Borrowed("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); let mut gossip_engine = GossipEngine::::new( network.clone(), - engine_id.clone(), - "my_protocol", + protocol.clone(), Arc::new(AllowAll{}), ); @@ -404,7 +404,7 @@ mod tests { event_sender.start_send( Event::NotificationStreamOpened { remote: remote_peer.clone(), - engine_id: engine_id.clone(), + protocol: protocol.clone(), role: ObservedRole::Authority, } ).expect("Event stream is unbounded; qed."); @@ -413,7 +413,7 @@ mod tests { let events = messages.iter().cloned().map(|m| { Event::NotificationsReceived { remote: remote_peer.clone(), - messages: vec![(engine_id, m.into())] + messages: vec![(protocol.clone(), m.into())] } }).collect::>(); @@ -498,7 +498,7 @@ mod tests { } fn prop(channels: Vec, notifications: Vec>) { - let engine_id = [1, 2, 3, 4]; + let protocol = Cow::Borrowed("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); @@ -524,8 +524,7 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), - engine_id.clone(), - "my_protocol", + protocol.clone(), Arc::new(TestValidator{}), ); @@ -558,7 +557,7 @@ mod tests { event_sender.start_send( Event::NotificationStreamOpened { remote: remote_peer.clone(), - engine_id: engine_id.clone(), + protocol: protocol.clone(), role: ObservedRole::Authority, } ).expect("Event stream is unbounded; qed."); @@ -576,7 +575,7 @@ mod tests { message.push(i_notification.try_into().unwrap()); message.push(i_message.try_into().unwrap()); - (engine_id, message.into()) + (protocol.clone(), message.into()) }).collect(); event_sender.start_send(Event::NotificationsReceived { diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 1d566ed3cbba2..09e946d1a1ea9 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -33,7 +33,7 @@ //! - Implement the `Network` trait, representing the low-level networking primitives. It is //! already implemented on `sc_network::NetworkService`. //! - Implement the `Validator` trait. See the section below. -//! - Decide on a `ConsensusEngineId`. Each gossiping protocol should have a different one. +//! - Decide on a protocol name. Each gossiping protocol should have a different one. //! - Build a `GossipEngine` using these three elements. //! - Use the methods of the `GossipEngine` in order to send out messages and receive incoming //! messages. @@ -60,7 +60,7 @@ pub use self::validator::{DiscardAll, MessageIntent, Validator, ValidatorContext use futures::prelude::*; use sc_network::{Event, ExHashT, NetworkService, PeerId, ReputationChange}; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use sp_runtime::{traits::Block as BlockT}; use std::{borrow::Cow, pin::Pin, sync::Arc}; mod bridge; @@ -79,15 +79,14 @@ pub trait Network { fn disconnect_peer(&self, who: PeerId); /// Send a notification to a peer. - fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec); + fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec); /// Registers a notifications protocol. /// /// See the documentation of [`NetworkService:register_notifications_protocol`] for more information. fn register_notifications_protocol( &self, - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, str>, + protocol: Cow<'static, str>, ); /// Notify everyone we're connected to that we have the given block. @@ -110,16 +109,15 @@ impl Network for Arc> { NetworkService::disconnect_peer(self, who) } - fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec) { - NetworkService::write_notification(self, who, engine_id, message) + fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec) { + NetworkService::write_notification(self, who, protocol, message) } fn register_notifications_protocol( &self, - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, str>, + protocol: Cow<'static, str>, ) { - NetworkService::register_notifications_protocol(self, engine_id, protocol_name) + NetworkService::register_notifications_protocol(self, protocol) } fn announce(&self, block: B::Hash, associated_data: Vec) { diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 60c669ecb6680..8bd6d9df01911 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -18,6 +18,7 @@ use crate::{Network, MessageIntent, Validator, ValidatorContext, ValidationResult}; +use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::iter; @@ -26,7 +27,6 @@ use log::{error, trace}; use lru::LruCache; use libp2p::PeerId; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; -use sp_runtime::ConsensusEngineId; use sc_network::ObservedRole; use wasm_timer::Instant; @@ -89,7 +89,7 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { /// Send addressed message to a peer. fn send_message(&mut self, who: &PeerId, message: Vec) { - self.network.write_notification(who.clone(), self.gossip.engine_id, message); + self.network.write_notification(who.clone(), self.gossip.protocol.clone(), message); } /// Send all messages with given topic to a peer. @@ -100,7 +100,7 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { fn propagate<'a, B: BlockT, I>( network: &mut dyn Network, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, messages: I, intent: MessageIntent, peers: &mut HashMap>, @@ -138,7 +138,7 @@ fn propagate<'a, B: BlockT, I>( peer.known_messages.insert(message_hash.clone()); trace!(target: "gossip", "Propagating to {}: {:?}", id, message); - network.write_notification(id.clone(), engine_id, message.clone()); + network.write_notification(id.clone(), protocol.clone(), message.clone()); } } } @@ -148,19 +148,19 @@ pub struct ConsensusGossip { peers: HashMap>, messages: Vec>, known_messages: LruCache, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, validator: Arc>, next_broadcast: Instant, } impl ConsensusGossip { /// Create a new instance using the given validator. - pub fn new(validator: Arc>, engine_id: ConsensusEngineId) -> Self { + pub fn new(validator: Arc>, protocol: Cow<'static, str>) -> Self { ConsensusGossip { peers: HashMap::new(), messages: Default::default(), known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), - engine_id, + protocol, validator, next_broadcast: Instant::now() + REBROADCAST_INTERVAL, } @@ -235,7 +235,14 @@ impl ConsensusGossip { fn rebroadcast(&mut self, network: &mut dyn Network) { let messages = self.messages.iter() .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); - propagate(network, self.engine_id, messages, MessageIntent::PeriodicRebroadcast, &mut self.peers, &self.validator); + propagate( + network, + self.protocol.clone(), + messages, + MessageIntent::PeriodicRebroadcast, + &mut self.peers, + &self.validator + ); } /// Broadcast all messages with given topic. @@ -247,7 +254,7 @@ impl ConsensusGossip { } else { None } ); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.engine_id, messages, intent, &mut self.peers, &self.validator); + propagate(network, self.protocol.clone(), messages, intent, &mut self.peers, &self.validator); } /// Prune old or no longer relevant consensus messages. Provide a predicate @@ -374,7 +381,7 @@ impl ConsensusGossip { peer.known_messages.insert(entry.message_hash.clone()); trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); - network.write_notification(who.clone(), self.engine_id, entry.message.clone()); + network.write_notification(who.clone(), self.protocol.clone(), entry.message.clone()); } } } @@ -390,7 +397,14 @@ impl ConsensusGossip { let message_hash = HashFor::::hash(&message); self.register_message_hashed(message_hash, topic, message.clone(), None); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.engine_id, iter::once((&message_hash, &topic, &message)), intent, &mut self.peers, &self.validator); + propagate( + network, + self.protocol.clone(), + iter::once((&message_hash, &topic, &message)), + intent, + &mut self.peers, + &self.validator + ); } /// Send addressed message to a peer. The message is not kept or multicast @@ -411,7 +425,7 @@ impl ConsensusGossip { trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); peer.known_messages.insert(message_hash); - network.write_notification(who.clone(), self.engine_id, message); + network.write_notification(who.clone(), self.protocol.clone(), message); } } @@ -485,11 +499,11 @@ mod tests { unimplemented!(); } - fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { + fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { unimplemented!(); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} + fn register_notifications_protocol(&self, _: Cow<'static, str>) {} fn announce(&self, _: B::Hash, _: Vec) { unimplemented!(); @@ -520,7 +534,7 @@ mod tests { let prev_hash = H256::random(); let best_hash = H256::random(); - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into()); let m1_hash = H256::random(); let m2_hash = H256::random(); let m1 = vec![1, 2, 3]; @@ -547,7 +561,7 @@ mod tests { #[test] fn message_stream_include_those_sent_before_asking() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into()); // Register message. let message = vec![4, 5, 6]; @@ -562,7 +576,7 @@ mod tests { #[test] fn can_keep_multiple_messages_per_topic() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into()); let topic = [1; 32].into(); let msg_a = vec![1, 2, 3]; @@ -576,7 +590,7 @@ mod tests { #[test] fn peer_is_removed_on_disconnect() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into()); let mut network = NoOpNetwork::default(); @@ -592,7 +606,7 @@ mod tests { fn on_incoming_ignores_discarded_messages() { let to_forward = ConsensusGossip::::new( Arc::new(DiscardAll), - [0, 0, 0, 0], + "/foo".into(), ).on_incoming( &mut NoOpNetwork::default(), PeerId::random(), @@ -612,7 +626,7 @@ mod tests { let to_forward = ConsensusGossip::::new( Arc::new(AllowAll), - [0, 0, 0, 0], + "/foo".into(), ).on_incoming( &mut network, // Unregistered peer. diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index c8684eba625c5..41723d9068c2e 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -30,7 +30,7 @@ use libp2p::kad::record; use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}; use log::debug; use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; -use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId, Justification}; +use sp_runtime::{traits::{Block as BlockT, NumberFor}, Justification}; use std::{ borrow::Cow, collections::{HashSet, VecDeque}, @@ -131,7 +131,7 @@ pub enum BehaviourOut { /// Node we opened the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, /// Role of the remote. @@ -147,7 +147,7 @@ pub enum BehaviourOut { /// Id of the peer we are connected to. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -158,7 +158,7 @@ pub enum BehaviourOut { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, }, /// Received one or more messages from the given node using the given protocol. @@ -166,7 +166,7 @@ pub enum BehaviourOut { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(ConsensusEngineId, Bytes)>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, /// Events generated by a DHT as a response to get_value or put_value requests as well as the @@ -257,19 +257,20 @@ impl Behaviour { /// will retain the protocols that were registered then, and not any new one. pub fn register_notifications_protocol( &mut self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol: impl Into>, ) { + let protocol = protocol.into(); + // This is the message that we will send to the remote as part of the initial handshake. // At the moment, we force this to be an encoded `Roles`. let handshake_message = Roles::from(&self.role).encode(); - let list = self.substrate.register_notifications_protocol(engine_id, protocol_name, handshake_message); + let list = self.substrate.register_notifications_protocol(protocol.clone(), handshake_message); for (remote, roles, notifications_sink) in list { let role = reported_roles_to_observed_role(&self.role, remote, roles); self.events.push_back(BehaviourOut::NotificationStreamOpened { remote: remote.clone(), - engine_id, + protocol: protocol.clone(), role, notifications_sink: notifications_sink.clone(), }); @@ -363,28 +364,28 @@ Behaviour { }, CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles, notifications_sink } => { let role = reported_roles_to_observed_role(&self.role, &remote, roles); - for engine_id in protocols { + for protocol in protocols { self.events.push_back(BehaviourOut::NotificationStreamOpened { remote: remote.clone(), - engine_id, + protocol, role: role.clone(), notifications_sink: notifications_sink.clone(), }); } }, CustomMessageOutcome::NotificationStreamReplaced { remote, protocols, notifications_sink } => - for engine_id in protocols { + for protocol in protocols { self.events.push_back(BehaviourOut::NotificationStreamReplaced { remote: remote.clone(), - engine_id, + protocol, notifications_sink: notifications_sink.clone(), }); }, CustomMessageOutcome::NotificationStreamClosed { remote, protocols } => - for engine_id in protocols { + for protocol in protocols { self.events.push_back(BehaviourOut::NotificationStreamClosed { remote: remote.clone(), - engine_id, + protocol, }); }, CustomMessageOutcome::NotificationsReceived { remote, messages } => { diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 86450dc6e79bf..db33623a2e330 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -41,7 +41,7 @@ use libp2p::{ }; use prometheus_endpoint::Registry; use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use sp_runtime::traits::Block as BlockT; use std::{borrow::Cow, convert::TryFrom, future::Future, pin::Pin, str::FromStr}; use std::{ collections::HashMap, @@ -400,9 +400,8 @@ pub struct NetworkConfiguration { pub boot_nodes: Vec, /// The node key configuration, which determines the node's network identity keypair. pub node_key: NodeKeyConfig, - /// List of notifications protocols that the node supports. Must also include a - /// `ConsensusEngineId` for backwards-compatibility. - pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, + /// List of names of notifications protocols that the node supports. + pub notifications_protocols: Vec>, /// List of request-response protocols that the node supports. pub request_response_protocols: Vec, /// Maximum allowed number of incoming connections. diff --git a/client/network/src/gossip.rs b/client/network/src/gossip.rs index 9d20229288a42..ac3f92e9d37aa 100644 --- a/client/network/src/gossip.rs +++ b/client/network/src/gossip.rs @@ -53,8 +53,9 @@ use async_std::sync::{Mutex, MutexGuard}; use futures::prelude::*; use futures::channel::mpsc::{channel, Receiver, Sender}; use libp2p::PeerId; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use sp_runtime::traits::Block as BlockT; use std::{ + borrow::Cow, collections::VecDeque, fmt, sync::Arc, @@ -82,7 +83,7 @@ impl QueuedSender { pub fn new( service: Arc>, peer_id: PeerId, - protocol: ConsensusEngineId, + protocol: Cow<'static, str>, queue_size_limit: usize, messages_encode: F ) -> (Self, impl Future + Send + 'static) @@ -193,7 +194,7 @@ async fn create_background_future Vec> mut wait_for_sender: Receiver<()>, service: Arc>, peer_id: PeerId, - protocol: ConsensusEngineId, + protocol: Cow<'static, str>, shared_message_queue: SharedMessageQueue, messages_encode: F, ) { @@ -212,7 +213,7 @@ async fn create_background_future Vec> // Starting from below, we try to send the message. If an error happens when sending, // the only sane option we have is to silently discard the message. - let sender = match service.notification_sender(peer_id.clone(), protocol) { + let sender = match service.notification_sender(peer_id.clone(), protocol.clone()) { Ok(s) => s, Err(_) => continue, }; diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index 0f01ed81bffcb..e94052c0e4d29 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -20,7 +20,7 @@ use crate::{config, gossip::QueuedSender, Event, NetworkService, NetworkWorker}; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{sync::Arc, time::Duration}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< @@ -120,24 +120,24 @@ fn build_test_full_node(config: config::NetworkConfiguration) (service, event_stream) } -const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; +const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); /// Builds two nodes and their associated events stream. -/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +/// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. fn build_nodes_one_proto() -> (Arc, impl Stream, Arc, impl Stream) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + notifications_protocols: vec![PROTOCOL_NAME], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + notifications_protocols: vec![PROTOCOL_NAME], listen_addresses: vec![], reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr, @@ -165,7 +165,7 @@ fn basic_works() { Event::NotificationStreamClosed { .. } => panic!(), Event::NotificationsReceived { messages, .. } => { for message in messages { - assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.0, PROTOCOL_NAME); assert_eq!(message.1, &b"message"[..]); received_notifications += 1; } @@ -181,7 +181,7 @@ fn basic_works() { async_std::task::block_on(async move { let (mut sender, bg_future) = - QueuedSender::new(node1, node2_id, ENGINE_ID, NUM_NOTIFS, |msg| msg); + QueuedSender::new(node1, node2_id, PROTOCOL_NAME, NUM_NOTIFS, |msg| msg); async_std::task::spawn(bg_future); // Wait for the `NotificationStreamOpened`. diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9403e471b0f27..d0b6b2823a2c8 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -37,7 +37,7 @@ use sp_consensus::{ import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} }; use codec::{Decode, DecodeAll, Encode}; -use sp_runtime::{generic::BlockId, ConsensusEngineId, Justification}; +use sp_runtime::{generic::BlockId, Justification}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub }; @@ -231,8 +231,8 @@ pub struct Protocol { transaction_pool: Arc>, /// Handles opening the unique substream and sending and receiving raw messages. behaviour: GenericProto, - /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: HashMap>, + /// List of notifications protocols that have been registered. + notification_protocols: Vec>, /// For each protocol name, the legacy equivalent. legacy_equiv_by_name: HashMap, Fallback>, /// Name of the protocol used for transactions. @@ -252,6 +252,7 @@ struct PacketStats { count_in: u64, count_out: u64, } + /// Peer information #[derive(Debug, Clone)] struct Peer { @@ -349,8 +350,8 @@ fn build_status_message(protocol_config: &ProtocolConfig, chain: &Arc /// Fallback mechanism to use to send a notification if no substream is open. #[derive(Debug, Clone, PartialEq, Eq)] enum Fallback { - /// Use a `Message::Consensus` with the given engine ID. - Consensus(ConsensusEngineId), + /// Formerly-known as `Consensus` messages. Now regular notifications. + Consensus, /// The message is the bytes encoding of a `Transactions` (which is itself defined as a `Vec`). Transactions, /// The message is the bytes encoding of a `BlockAnnounce`. @@ -446,7 +447,7 @@ impl Protocol { transaction_pool, peerset_handle: peerset_handle.clone(), behaviour, - protocol_name_by_engine: HashMap::new(), + notification_protocols: Vec::new(), legacy_equiv_by_name, transactions_protocol, block_announces_protocol, @@ -621,7 +622,9 @@ impl Protocol { GenericMessage::RemoteCallRequest(_) | GenericMessage::RemoteReadRequest(_) | GenericMessage::RemoteHeaderRequest(_) | - GenericMessage::RemoteChangesRequest(_) => { + GenericMessage::RemoteChangesRequest(_) | + GenericMessage::Consensus(_) | + GenericMessage::ConsensusBatch(_) => { debug!( target: "sub-libp2p", "Received no longer supported legacy request from {:?}", @@ -630,38 +633,6 @@ impl Protocol { self.disconnect_peer(&who); self.peerset_handle.report_peer(who, rep::BAD_PROTOCOL); }, - GenericMessage::Consensus(msg) => - return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { - CustomMessageOutcome::NotificationsReceived { - remote: who, - messages: vec![(msg.engine_id, From::from(msg.data))], - } - } else { - debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); - CustomMessageOutcome::None - }, - GenericMessage::ConsensusBatch(messages) => { - let messages = messages - .into_iter() - .filter_map(|msg| { - if self.protocol_name_by_engine.contains_key(&msg.engine_id) { - Some((msg.engine_id, From::from(msg.data))) - } else { - debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); - None - } - }) - .collect::>(); - - return if !messages.is_empty() { - CustomMessageOutcome::NotificationsReceived { - remote: who, - messages, - } - } else { - CustomMessageOutcome::None - }; - }, } CustomMessageOutcome::None @@ -685,7 +656,7 @@ impl Protocol { // Notify all the notification protocols as closed. CustomMessageOutcome::NotificationStreamClosed { remote: peer, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), + protocols: self.notification_protocols.clone(), } } else { CustomMessageOutcome::None @@ -939,7 +910,7 @@ impl Protocol { // Notify all the notification protocols as open. CustomMessageOutcome::NotificationStreamOpened { remote: who, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), + protocols: self.notification_protocols.clone(), roles: info.roles, notifications_sink, } @@ -952,16 +923,17 @@ impl Protocol { /// returns a list of substreams to open as a result. pub fn register_notifications_protocol<'a>( &'a mut self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol: impl Into>, handshake_message: Vec, ) -> impl Iterator + 'a { - let protocol_name = protocol_name.into(); - if self.protocol_name_by_engine.insert(engine_id, protocol_name.clone()).is_some() { - error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); + let protocol = protocol.into(); + + if self.notification_protocols.iter().any(|p| *p == protocol) { + error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol); } else { - self.behaviour.register_notif_protocol(protocol_name.clone(), handshake_message); - self.legacy_equiv_by_name.insert(protocol_name, Fallback::Consensus(engine_id)); + self.notification_protocols.push(protocol.clone()); + self.behaviour.register_notif_protocol(protocol.clone(), handshake_message); + self.legacy_equiv_by_name.insert(protocol, Fallback::Consensus); } let behaviour = &self.behaviour; @@ -1450,20 +1422,20 @@ pub enum CustomMessageOutcome { /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, - protocols: Vec, + protocols: Vec>, roles: Roles, notifications_sink: NotificationsSink }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { remote: PeerId, - protocols: Vec, + protocols: Vec>, notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. - NotificationStreamClosed { remote: PeerId, protocols: Vec }, + NotificationStreamClosed { remote: PeerId, protocols: Vec> }, /// Messages have been received on one or more notifications protocols. - NotificationsReceived { remote: PeerId, messages: Vec<(ConsensusEngineId, Bytes)> }, + NotificationsReceived { remote: PeerId, messages: Vec<(Cow<'static, str>, Bytes)> }, /// A new block request must be emitted. /// You must later call either [`Protocol::on_block_response`] or /// [`Protocol::on_block_request_failed`]. @@ -1664,7 +1636,7 @@ impl NetworkBehaviour for Protocol { GenericProtoOut::CustomProtocolReplaced { peer_id, notifications_sink, .. } => { CustomMessageOutcome::NotificationStreamReplaced { remote: peer_id, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), + protocols: self.notification_protocols.clone(), notifications_sink, } }, @@ -1675,10 +1647,10 @@ impl NetworkBehaviour for Protocol { self.on_custom_message(peer_id, message), GenericProtoOut::Notification { peer_id, protocol_name, message } => match self.legacy_equiv_by_name.get(&protocol_name) { - Some(Fallback::Consensus(engine_id)) => { + Some(Fallback::Consensus) => { CustomMessageOutcome::NotificationsReceived { remote: peer_id, - messages: vec![(*engine_id, message.freeze())], + messages: vec![(protocol_name, message.freeze())], } } Some(Fallback::Transactions) => { diff --git a/client/network/src/protocol/event.rs b/client/network/src/protocol/event.rs index 637bf805b5024..86cb93bef26dd 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/src/protocol/event.rs @@ -20,7 +20,7 @@ use bytes::Bytes; use libp2p::core::PeerId; use libp2p::kad::record::Key; -use sp_runtime::ConsensusEngineId; +use std::borrow::Cow; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -53,7 +53,7 @@ pub enum Event { /// Node we opened the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Role of the remote. role: ObservedRole, }, @@ -64,7 +64,7 @@ pub enum Event { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, }, /// Received one or more messages from the given node using the given protocol. @@ -72,7 +72,7 @@ pub enum Event { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(ConsensusEngineId, Bytes)>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 1cd78c0ed1dda..dae7b86db8771 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -216,7 +216,7 @@ pub mod generic { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct ConsensusMessage { /// Identifies consensus engine. - pub engine_id: ConsensusEngineId, + pub protocol: ConsensusEngineId, /// Message payload. pub data: Vec, } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 5fc8485947ff5..3296a97d71bbc 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -53,10 +53,7 @@ use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; use parking_lot::Mutex; use sc_peerset::PeersetHandle; use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - ConsensusEngineId, -}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ borrow::Cow, @@ -100,9 +97,7 @@ pub struct NetworkService { to_worker: TracingUnboundedSender>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. - peers_notifications_sinks: Arc>>, - /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: Mutex>>, + peers_notifications_sinks: Arc), NotificationsSink>>>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notifications_sizes_metric: Option, @@ -331,8 +326,8 @@ impl NetworkWorker { } }; - for (engine_id, protocol_name) in ¶ms.network_config.notifications_protocols { - behaviour.register_notifications_protocol(*engine_id, protocol_name.clone()); + for protocol in ¶ms.network_config.notifications_protocols { + behaviour.register_notifications_protocol(protocol.clone()); } let (transport, bandwidth) = { let (config_mem, config_wasm) = match params.network_config.transport { @@ -384,9 +379,6 @@ impl NetworkWorker { let external_addresses = Arc::new(Mutex::new(Vec::new())); let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new())); - let protocol_name_by_engine = Mutex::new({ - params.network_config.notifications_protocols.iter().cloned().collect() - }); let service = Arc::new(NetworkService { bandwidth, @@ -397,7 +389,6 @@ impl NetworkWorker { local_peer_id, to_worker, peers_notifications_sinks: peers_notifications_sinks.clone(), - protocol_name_by_engine, notifications_sizes_metric: metrics.as_ref().map(|metrics| metrics.notifications_sizes.clone()), _marker: PhantomData, @@ -640,40 +631,32 @@ impl NetworkService { /// The protocol must have been registered with `register_notifications_protocol` or /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). /// - pub fn write_notification(&self, target: PeerId, engine_id: ConsensusEngineId, message: Vec) { + pub fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. let sink = { let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { sink.clone() } else { // Notification silently discarded, as documented. + log::error!( + target: "sub-libp2p", + "Attempted to send notification on unknown protocol: {:?}", + protocol, + ); return; } }; - // Used later for the metrics report. - let message_len = message.len(); - - // Determine the wire protocol name corresponding to this `engine_id`. - let protocol_name = self.protocol_name_by_engine.lock().get(&engine_id).cloned(); - if let Some(protocol_name) = protocol_name { - sink.send_sync_notification(protocol_name, message); - } else { - log::error!( - target: "sub-libp2p", - "Attempted to send notification on unknown protocol: {:?}", - engine_id, - ); - return; - } - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { notifications_sizes_metric - .with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) - .observe(message_len as f64); + .with_label_values(&["out", &protocol]) + .observe(message.len() as f64); } + + // Sending is communicated to the `NotificationsSink`. + sink.send_sync_notification(protocol, message); } /// Obtains a [`NotificationSender`] for a connected peer, if it exists. @@ -746,31 +729,27 @@ impl NetworkService { pub fn notification_sender( &self, target: PeerId, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, ) -> Result { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. let sink = { let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { sink.clone() } else { return Err(NotificationSenderError::Closed); } }; - // Determine the wire protocol name corresponding to this `engine_id`. - let protocol_name = match self.protocol_name_by_engine.lock().get(&engine_id).cloned() { - Some(p) => p, - None => return Err(NotificationSenderError::BadProtocol), - }; + let notification_size_metric = self.notifications_sizes_metric.as_ref().map(|histogram| { + histogram.with_label_values(&["out", &protocol]) + }); Ok(NotificationSender { sink, - protocol_name, - notification_size_metric: self.notifications_sizes_metric.as_ref().map(|histogram| { - histogram.with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) - }), + protocol_name: protocol, + notification_size_metric, }) } @@ -841,17 +820,13 @@ impl NetworkService { /// /// Please call `event_stream` before registering a protocol, otherwise you may miss events /// about the protocol that you have registered. - // TODO: remove this method after https://github.com/paritytech/substrate/issues/4587 + // TODO: remove this method after https://github.com/paritytech/substrate/issues/6827 pub fn register_notifications_protocol( &self, - engine_id: ConsensusEngineId, protocol_name: impl Into>, ) { - let protocol_name = protocol_name.into(); - self.protocol_name_by_engine.lock().insert(engine_id, protocol_name.clone()); let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { - engine_id, - protocol_name, + protocol_name: protocol_name.into(), }); } @@ -1209,7 +1184,6 @@ enum ServiceToWorkerMsg { pending_response: oneshot::Sender, RequestFailure>>, }, RegisterNotifProtocol { - engine_id: ConsensusEngineId, protocol_name: Cow<'static, str>, }, DisconnectPeer(PeerId), @@ -1253,7 +1227,7 @@ pub struct NetworkWorker { >, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. - peers_notifications_sinks: Arc>>, + peers_notifications_sinks: Arc), NotificationsSink>>>, } impl Future for NetworkWorker { @@ -1347,10 +1321,8 @@ impl Future for NetworkWorker { }, } }, - ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { - this.network_service - .register_notifications_protocol(engine_id, protocol_name); - }, + ServiceToWorkerMsg::RegisterNotifProtocol { protocol_name } => + this.network_service.register_notifications_protocol(protocol_name), ServiceToWorkerMsg::DisconnectPeer(who) => this.network_service.user_protocol_mut().disconnect_peer(&who), ServiceToWorkerMsg::UpdateChain => @@ -1474,24 +1446,28 @@ impl Future for NetworkWorker { .inc(); } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, engine_id, notifications_sink, role })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { + remote, protocol, notifications_sink, role + })) => { if let Some(metrics) = this.metrics.as_ref() { metrics.notifications_streams_opened_total - .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id)]).inc(); + .with_label_values(&[&protocol]).inc(); } { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - peers_notifications_sinks.insert((remote.clone(), engine_id), notifications_sink); + peers_notifications_sinks.insert((remote.clone(), protocol.clone()), notifications_sink); } this.event_streams.send(Event::NotificationStreamOpened { remote, - engine_id, + protocol, role, }); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { remote, engine_id, notifications_sink })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { + remote, protocol, notifications_sink + })) => { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - if let Some(s) = peers_notifications_sinks.get_mut(&(remote, engine_id)) { + if let Some(s) = peers_notifications_sinks.get_mut(&(remote, protocol)) { *s = notifications_sink; } else { log::error!( @@ -1513,33 +1489,33 @@ impl Future for NetworkWorker { // https://github.com/paritytech/substrate/issues/6403. /*this.event_streams.send(Event::NotificationStreamClosed { remote, - engine_id, + protocol, }); this.event_streams.send(Event::NotificationStreamOpened { remote, - engine_id, + protocol, role, });*/ }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, engine_id })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, protocol })) => { if let Some(metrics) = this.metrics.as_ref() { metrics.notifications_streams_closed_total - .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id[..])]).inc(); + .with_label_values(&[&protocol[..]]).inc(); } this.event_streams.send(Event::NotificationStreamClosed { remote: remote.clone(), - engine_id, + protocol: protocol.clone(), }); { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - peers_notifications_sinks.remove(&(remote.clone(), engine_id)); + peers_notifications_sinks.remove(&(remote.clone(), protocol)); } }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages })) => { if let Some(metrics) = this.metrics.as_ref() { - for (engine_id, message) in &messages { + for (protocol, message) in &messages { metrics.notifications_sizes - .with_label_values(&["in", &maybe_utf8_bytes_to_string(engine_id)]) + .with_label_values(&["in", protocol]) .observe(message.len() as f64); } } @@ -1748,17 +1724,6 @@ impl Future for NetworkWorker { impl Unpin for NetworkWorker { } -/// Turns bytes that are potentially UTF-8 into a reasonable representable string. -/// -/// Meant to be used only for debugging or metrics-reporting purposes. -pub(crate) fn maybe_utf8_bytes_to_string(id: &[u8]) -> Cow { - if let Ok(s) = std::str::from_utf8(&id[..]) { - Cow::Borrowed(s) - } else { - Cow::Owned(format!("{:?}", id)) - } -} - /// The libp2p swarm, customized for our needs. type Swarm = libp2p::swarm::Swarm>; diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 1b86a5fa4317d..976548f6ed440 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -33,7 +33,6 @@ //! use crate::Event; -use super::maybe_utf8_bytes_to_string; use futures::{prelude::*, channel::mpsc, ready, stream::FusedStream}; use parking_lot::Mutex; @@ -228,23 +227,23 @@ impl Metrics { .with_label_values(&["dht", "sent", name]) .inc_by(num); } - Event::NotificationStreamOpened { engine_id, .. } => { + Event::NotificationStreamOpened { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-open-{:?}", engine_id), "sent", name]) + .with_label_values(&[&format!("notif-open-{:?}", protocol), "sent", name]) .inc_by(num); }, - Event::NotificationStreamClosed { engine_id, .. } => { + Event::NotificationStreamClosed { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "sent", name]) + .with_label_values(&[&format!("notif-closed-{:?}", protocol), "sent", name]) .inc_by(num); }, Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { + for (protocol, message) in messages { self.events_total - .with_label_values(&[&format!("notif-{:?}", engine_id), "sent", name]) + .with_label_values(&[&format!("notif-{:?}", protocol), "sent", name]) .inc_by(num); self.notifications_sizes - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "sent", name]) + .with_label_values(&[protocol, "sent", name]) .inc_by(num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::max_value()))); } }, @@ -258,23 +257,23 @@ impl Metrics { .with_label_values(&["dht", "received", name]) .inc(); } - Event::NotificationStreamOpened { engine_id, .. } => { + Event::NotificationStreamOpened { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-open-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-open-{:?}", protocol), "received", name]) .inc(); }, - Event::NotificationStreamClosed { engine_id, .. } => { + Event::NotificationStreamClosed { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-closed-{:?}", protocol), "received", name]) .inc(); }, Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { + for (protocol, message) in messages { self.events_total - .with_label_values(&[&format!("notif-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-{:?}", protocol), "received", name]) .inc(); self.notifications_sizes - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "received", name]) + .with_label_values(&[&protocol, "received", name]) .inc_by(u64::try_from(message.len()).unwrap_or(u64::max_value())); } }, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 4b6f9dd156482..76a924748ad2a 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -21,7 +21,7 @@ use crate::{config, Event, NetworkService, NetworkWorker}; use libp2p::PeerId; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{sync::Arc, time::Duration}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< @@ -121,24 +121,24 @@ fn build_test_full_node(config: config::NetworkConfiguration) (service, event_stream) } -const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; +const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); /// Builds two nodes and their associated events stream. -/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +/// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. fn build_nodes_one_proto() -> (Arc, impl Stream, Arc, impl Stream) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + notifications_protocols: vec![PROTOCOL_NAME], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + notifications_protocols: vec![PROTOCOL_NAME], listen_addresses: vec![], reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr, @@ -161,10 +161,10 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node1.write_notification(node2.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); } for _ in 0..(rand::random::() % 5) { - node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node2.write_notification(node1.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); } async_std::task::block_on(async move { @@ -187,10 +187,10 @@ fn notifications_state_consistent() { // Start by sending a notification from node1 to node2 and vice-versa. Part of the // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { - node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node1.write_notification(node2.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); } if rand::random::() % 5 >= 3 { - node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node2.write_notification(node1.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); } // Also randomly disconnect the two nodes from time to time. @@ -219,31 +219,31 @@ fn notifications_state_consistent() { }; match next_event { - future::Either::Left(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) => { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; assert_eq!(remote, *node2.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); + assert_eq!(protocol, PROTOCOL_NAME); } - future::Either::Right(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) => { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; assert_eq!(remote, *node1.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); + assert_eq!(protocol, PROTOCOL_NAME); } - future::Either::Left(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) => { assert!(node1_to_node2_open); node1_to_node2_open = false; assert_eq!(remote, *node2.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); + assert_eq!(protocol, PROTOCOL_NAME); } - future::Either::Right(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) => { assert!(node2_to_node1_open); node2_to_node1_open = false; assert_eq!(remote, *node1.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); + assert_eq!(protocol, PROTOCOL_NAME); } future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); @@ -251,7 +251,7 @@ fn notifications_state_consistent() { if rand::random::() % 5 >= 4 { node1.write_notification( node2.local_peer_id().clone(), - ENGINE_ID, + PROTOCOL_NAME, b"hello world".to_vec() ); } @@ -262,7 +262,7 @@ fn notifications_state_consistent() { if rand::random::() % 5 >= 4 { node2.write_notification( node1.local_peer_id().clone(), - ENGINE_ID, + PROTOCOL_NAME, b"hello world".to_vec() ); } @@ -281,7 +281,7 @@ fn lots_of_incoming_peers_works() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (main_node, _) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + notifications_protocols: vec![PROTOCOL_NAME], listen_addresses: vec![listen_addr.clone()], in_peers: u32::max_value(), transport: config::TransportConfig::MemoryOnly, @@ -298,7 +298,7 @@ fn lots_of_incoming_peers_works() { let main_node_peer_id = main_node_peer_id.clone(); let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + notifications_protocols: vec![PROTOCOL_NAME], listen_addresses: vec![], reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr.clone(), @@ -364,7 +364,7 @@ fn notifications_back_pressure() { Event::NotificationStreamClosed { .. } => panic!(), Event::NotificationsReceived { messages, .. } => { for message in messages { - assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.0, PROTOCOL_NAME); assert_eq!(message.1, format!("hello #{}", received_notifications)); received_notifications += 1; } @@ -389,7 +389,7 @@ fn notifications_back_pressure() { // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id.clone(), ENGINE_ID).unwrap(); + let notif = node1.notification_sender(node2_id.clone(), PROTOCOL_NAME).unwrap(); notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 587feebe55c14..1aec3dae22b92 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -58,7 +58,7 @@ use sp_core::H256; use sc_network::config::ProtocolConfig; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::{ConsensusEngineId, Justification}; +use sp_runtime::Justification; use substrate_test_runtime_client::{self, AccountKeyring}; use sc_service::client::Client; pub use sc_network::config::EmptyTransactionPool; @@ -557,7 +557,7 @@ pub struct FullPeerConfig { /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. - pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, + pub notifications_protocols: Vec>, } pub trait TestNetFactory: Sized { From 9d65d983a08c59f4239965f4fb65bd7d0099c788 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Thu, 19 Nov 2020 03:43:41 +0800 Subject: [PATCH 21/34] sc-cli: replace bip39 with tiny-bip39 (#7551) Signed-off-by: koushiro --- Cargo.lock | 99 ++++++------------------------------------- client/cli/Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a87dfec66403..43af7d1a31674 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -239,7 +239,7 @@ dependencies = [ "concurrent-queue", "fastrand", "futures-lite", - "once_cell 1.4.1", + "once_cell", "vec-arena", ] @@ -253,7 +253,7 @@ dependencies = [ "async-io", "futures-lite", "num_cpus", - "once_cell 1.4.1", + "once_cell", ] [[package]] @@ -268,7 +268,7 @@ dependencies = [ "futures-lite", "libc", "log", - "once_cell 1.4.1", + "once_cell", "parking", "polling", "socket2", @@ -307,7 +307,7 @@ dependencies = [ "log", "memchr", "num_cpus", - "once_cell 1.4.1", + "once_cell", "pin-project-lite", "pin-utils", "slab", @@ -454,21 +454,6 @@ dependencies = [ "which", ] -[[package]] -name = "bip39" -version = "0.6.0-beta.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7059804e226b3ac116519a252d7f5fb985a5ccc0e93255e036a5f7e7283323f4" -dependencies = [ - "failure", - "hashbrown 0.1.8", - "hmac 0.7.1", - "once_cell 0.1.8", - "pbkdf2 0.3.0", - "rand 0.6.5", - "sha2 0.8.2", -] - [[package]] name = "bitflags" version = "1.2.1" @@ -596,7 +581,7 @@ dependencies = [ "atomic-waker", "fastrand", "futures-lite", - "once_cell 1.4.1", + "once_cell", ] [[package]] @@ -1061,7 +1046,7 @@ dependencies = [ "lazy_static", "maybe-uninit", "memoffset", - "scopeguard 1.1.0", + "scopeguard", ] [[package]] @@ -1688,7 +1673,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", - "once_cell 1.4.1", + "once_cell", "parity-scale-codec", "parity-util-mem", "paste 0.1.18", @@ -1967,7 +1952,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" dependencies = [ - "once_cell 1.4.1", + "once_cell", ] [[package]] @@ -2197,16 +2182,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da" -dependencies = [ - "byteorder", - "scopeguard 0.3.3", -] - [[package]] name = "hashbrown" version = "0.6.3" @@ -3363,22 +3338,13 @@ dependencies = [ "paste 0.1.18", ] -[[package]] -name = "lock_api" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" -dependencies = [ - "scopeguard 0.3.3", -] - [[package]] name = "lock_api" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "scopeguard 1.1.0", + "scopeguard", ] [[package]] @@ -3387,7 +3353,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" dependencies = [ - "scopeguard 1.1.0", + "scopeguard", ] [[package]] @@ -4250,15 +4216,6 @@ dependencies = [ "wasmparser 0.57.0", ] -[[package]] -name = "once_cell" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532c29a261168a45ce28948f9537ddd7a5dd272cc513b3017b1e82a88f962c37" -dependencies = [ - "parking_lot 0.7.1", -] - [[package]] name = "once_cell" version = "1.4.1" @@ -5334,16 +5291,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" -[[package]] -name = "parking_lot" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" -dependencies = [ - "lock_api 0.1.5", - "parking_lot_core 0.4.0", -] - [[package]] name = "parking_lot" version = "0.9.0" @@ -5376,19 +5323,6 @@ dependencies = [ "parking_lot_core 0.8.0", ] -[[package]] -name = "parking_lot_core" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" -dependencies = [ - "libc", - "rand 0.6.5", - "rustc_version", - "smallvec 0.6.13", - "winapi 0.3.9", -] - [[package]] name = "parking_lot_core" version = "0.6.2" @@ -5466,7 +5400,6 @@ checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" dependencies = [ "byteorder", "crypto-mac 0.7.0", - "rayon", ] [[package]] @@ -6269,7 +6202,7 @@ checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" dependencies = [ "cc", "libc", - "once_cell 1.4.1", + "once_cell", "spin", "untrusted", "web-sys", @@ -6547,7 +6480,6 @@ version = "0.8.0" dependencies = [ "ansi_term 0.12.1", "atty", - "bip39", "chrono", "fdlimit", "futures 0.3.5", @@ -6580,6 +6512,7 @@ dependencies = [ "structopt", "tempfile", "thiserror", + "tiny-bip39", "tokio 0.2.22", "tracing", "tracing-log", @@ -7647,12 +7580,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" -[[package]] -name = "scopeguard" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" - [[package]] name = "scopeguard" version = "1.1.0" @@ -9329,7 +9256,7 @@ checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" dependencies = [ "anyhow", "hmac 0.8.1", - "once_cell 1.4.1", + "once_cell", "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 942d30e90db5b..b0662c5eddf7a 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -25,7 +25,7 @@ libp2p = "0.30.1" parity-scale-codec = "1.3.0" hex = "0.4.2" rand = "0.7.3" -bip39 = "0.6.0-beta.1" +tiny-bip39 = "0.8.0" serde_json = "1.0.41" sc-keystore = { version = "2.0.0", path = "../keystore" } sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } From 69198d1e5735798c8baa1921c6e2091c93260752 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 18 Nov 2020 21:10:00 +0100 Subject: [PATCH 22/34] Add extra docs to on_initialize (#7552) * Add some extra on_initialize docs. * Address review comments. --- frame/support/src/traits.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index bc1700a43c3ee..b40ebe3dba67c 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1265,7 +1265,7 @@ pub trait ChangeMembers { Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); } - /// Compute diff between new and old members; they **must already be sorted**. + /// Compute diff between new and old members; they **must already be sorted**. /// /// Returns incoming and outgoing members. fn compute_members_diff( @@ -1427,6 +1427,9 @@ pub trait GetCallMetadata { #[impl_for_tuples(30)] pub trait OnFinalize { /// The block is being finalized. Implement to have something happen. + /// + /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, + /// including inherent extrinsics. fn on_finalize(_n: BlockNumber) {} } @@ -1438,6 +1441,10 @@ pub trait OnInitialize { /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. + /// + /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, + /// including inherent extrinsics. Hence for instance, if you runtime includes + /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } } @@ -1569,7 +1576,7 @@ pub mod schedule { /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed /// only if it is executed *before* the currently scheduled block. For periodic tasks, /// this dispatch is guaranteed to succeed only before the *initial* execution; for - /// others, use `reschedule_named`. + /// others, use `reschedule_named`. /// /// Will return an error if the `address` is invalid. fn reschedule( From f74de63d23214b530ac24ac3ea4ead1180e22e83 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 19 Nov 2020 12:40:12 +0100 Subject: [PATCH 23/34] More Extensible Multiaddress Format (#7380) * More extensible multiaddress format * update name * Don't depend on indices to define multiaddress type * Use MultiAddress in Node Template too! * reduce traits, fix build * support multiple `StaticLookup` * bump tx version * feedback --- bin/node-template/runtime/src/lib.rs | 6 +- bin/node/executor/tests/basic.rs | 2 +- bin/node/runtime/src/lib.rs | 4 +- bin/node/testing/src/bench.rs | 6 +- bin/node/testing/src/keyring.rs | 2 +- frame/indices/src/address.rs | 159 ------------------------- frame/indices/src/lib.rs | 15 ++- primitives/runtime/src/lib.rs | 4 + primitives/runtime/src/multiaddress.rs | 66 ++++++++++ primitives/runtime/src/traits.rs | 38 ++++++ 10 files changed, 125 insertions(+), 177 deletions(-) delete mode 100644 frame/indices/src/address.rs create mode 100644 primitives/runtime/src/multiaddress.rs diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index d67a5bde96457..aadfd931cdb50 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -13,7 +13,7 @@ use sp_runtime::{ transaction_validity::{TransactionValidity, TransactionSource}, }; use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, IdentityLookup, Verify, IdentifyAccount, NumberFor, Saturating, + BlakeTwo256, Block as BlockT, AccountIdLookup, Verify, IdentifyAccount, NumberFor, Saturating, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -148,7 +148,7 @@ impl frame_system::Trait for Runtime { /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; + type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. @@ -293,7 +293,7 @@ construct_runtime!( ); /// The address format for describing accounts. -pub type Address = AccountId; +pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 723e3a7e4ba62..a48efaea2d695 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -621,7 +621,7 @@ fn deploying_wasm_contract_should_work() { signed: Some((charlie(), signed_extra(2, 0))), function: Call::Contracts( pallet_contracts::Call::call::( - pallet_indices::address::Address::Id(addr.clone()), + sp_runtime::MultiAddress::Id(addr.clone()), 10, 500_000_000, vec![0x00, 0x01, 0x02, 0x03] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index bfa412e882031..3e08b2cf8a6f8 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -111,7 +111,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 260, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + transaction_version: 2, }; /// Native version. @@ -931,7 +931,7 @@ construct_runtime!( ); /// The address format for describing accounts. -pub type Address = ::Source; +pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 153a52375c2a9..32e4bab9773a5 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -317,7 +317,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { BlockType::RandomTransfersKeepAlive => { Call::Balances( BalancesCall::transfer_keep_alive( - pallet_indices::address::Address::Id(receiver), + sp_runtime::MultiAddress::Id(receiver), node_runtime::ExistentialDeposit::get() + 1, ) ) @@ -325,7 +325,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { BlockType::RandomTransfersReaping => { Call::Balances( BalancesCall::transfer( - pallet_indices::address::Address::Id(receiver), + sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential deposit // so that we kill the sender account. 100*DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), @@ -591,7 +591,7 @@ impl BenchKeyring { } }).into(); UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), + signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), function: payload.0, } } diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index 3413748563633..f0b8ff707294e 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -94,7 +94,7 @@ pub fn sign(xt: CheckedExtrinsic, spec_version: u32, tx_version: u32, genesis_ha } }).into(); UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), + signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), function: payload.0, } } diff --git a/frame/indices/src/address.rs b/frame/indices/src/address.rs deleted file mode 100644 index 0fd8933381328..0000000000000 --- a/frame/indices/src/address.rs +++ /dev/null @@ -1,159 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Address type that is union of index and id for an account. - -#[cfg(feature = "std")] -use std::fmt; -use sp_std::convert::TryInto; -use crate::Member; -use codec::{Encode, Decode, Input, Output, Error}; - -/// An indices-aware address, which can be either a direct `AccountId` or -/// an index. -#[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Hash))] -pub enum Address where - AccountId: Member, - AccountIndex: Member, -{ - /// It's an account ID (pubkey). - Id(AccountId), - /// It's an account index. - Index(AccountIndex), -} - -#[cfg(feature = "std")] -impl fmt::Display for Address where - AccountId: Member, - AccountIndex: Member, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl From for Address where - AccountId: Member, - AccountIndex: Member, -{ - fn from(a: AccountId) -> Self { - Address::Id(a) - } -} - -fn need_more_than(a: T, b: T) -> Result { - if a < b { Ok(b) } else { Err("Invalid range".into()) } -} - -impl Decode for Address where - AccountId: Member + Decode, - AccountIndex: Member + Decode + PartialOrd + Ord + From + Copy, -{ - fn decode(input: &mut I) -> Result { - Ok(match input.read_byte()? { - x @ 0x00..=0xef => Address::Index(AccountIndex::from(x as u32)), - 0xfc => Address::Index(AccountIndex::from( - need_more_than(0xef, u16::decode(input)?)? as u32 - )), - 0xfd => Address::Index(AccountIndex::from( - need_more_than(0xffff, u32::decode(input)?)? - )), - 0xfe => Address::Index( - need_more_than(0xffffffffu32.into(), Decode::decode(input)?)? - ), - 0xff => Address::Id(Decode::decode(input)?), - _ => return Err("Invalid address variant".into()), - }) - } -} - -impl Encode for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + Copy + From + TryInto, -{ - fn encode_to(&self, dest: &mut T) { - match *self { - Address::Id(ref i) => { - dest.push_byte(255); - dest.push(i); - } - Address::Index(i) => { - let maybe_u32: Result = i.try_into(); - if let Ok(x) = maybe_u32 { - if x > 0xffff { - dest.push_byte(253); - dest.push(&x); - } - else if x >= 0xf0 { - dest.push_byte(252); - dest.push(&(x as u16)); - } - else { - dest.push_byte(x as u8); - } - - } else { - dest.push_byte(254); - dest.push(&i); - } - }, - } - } -} - -impl codec::EncodeLike for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + Copy + From + TryInto, -{} - -impl Default for Address where - AccountId: Member + Default, - AccountIndex: Member, -{ - fn default() -> Self { - Address::Id(Default::default()) - } -} - -#[cfg(test)] -mod tests { - use codec::{Encode, Decode}; - - type Address = super::Address<[u8; 8], u32>; - fn index(i: u32) -> Address { super::Address::Index(i) } - fn id(i: [u8; 8]) -> Address { super::Address::Id(i) } - - fn compare(a: Option

, d: &[u8]) { - if let Some(ref a) = a { - assert_eq!(d, &a.encode()[..]); - } - assert_eq!(Address::decode(&mut &d[..]).ok(), a); - } - - #[test] - fn it_should_work() { - compare(Some(index(2)), &[2][..]); - compare(None, &[240][..]); - compare(None, &[252, 239, 0][..]); - compare(Some(index(240)), &[252, 240, 0][..]); - compare(Some(index(304)), &[252, 48, 1][..]); - compare(None, &[253, 255, 255, 0, 0][..]); - compare(Some(index(0x10000)), &[253, 0, 0, 1, 0][..]); - compare(Some(id([42, 69, 42, 69, 42, 69, 42, 69])), &[255, 42, 69, 42, 69, 42, 69, 42, 69][..]); - } -} diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index fd2eb956f9231..6d467aa673445 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -21,13 +21,13 @@ #![cfg_attr(not(feature = "std"), no_std)] mod mock; -pub mod address; mod tests; mod benchmarking; pub mod weights; use sp_std::prelude::*; use codec::Codec; +use sp_runtime::MultiAddress; use sp_runtime::traits::{ StaticLookup, Member, LookupError, Zero, Saturating, AtLeast32Bit }; @@ -35,10 +35,8 @@ use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage use frame_support::dispatch::DispatchResult; use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; use frame_system::{ensure_signed, ensure_root}; -use self::address::Address as RawAddress; pub use weights::WeightInfo; -pub type Address = RawAddress<::AccountId, ::AccountIndex>; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// The module's config trait. @@ -287,17 +285,18 @@ impl Module { /// Lookup an address to get an Id, if there's one there. pub fn lookup_address( - a: address::Address + a: MultiAddress ) -> Option { match a { - address::Address::Id(i) => Some(i), - address::Address::Index(i) => Self::lookup_index(i), + MultiAddress::Id(i) => Some(i), + MultiAddress::Index(i) => Self::lookup_index(i), + _ => None, } } } impl StaticLookup for Module { - type Source = address::Address; + type Source = MultiAddress; type Target = T::AccountId; fn lookup(a: Self::Source) -> Result { @@ -305,6 +304,6 @@ impl StaticLookup for Module { } fn unlookup(a: Self::Target) -> Self::Source { - address::Address::Id(a) + MultiAddress::Id(a) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index e6c707e906edc..ccd50334af660 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -56,9 +56,13 @@ pub mod traits; pub mod transaction_validity; pub mod random_number_generator; mod runtime_string; +mod multiaddress; pub use crate::runtime_string::*; +// Re-export Multiaddress +pub use multiaddress::MultiAddress; + /// Re-export these since they're only "kind of" generic. pub use generic::{DigestItem, Digest}; diff --git a/primitives/runtime/src/multiaddress.rs b/primitives/runtime/src/multiaddress.rs new file mode 100644 index 0000000000000..bb352f7eb5f8e --- /dev/null +++ b/primitives/runtime/src/multiaddress.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! MultiAddress type is a wrapper for multiple downstream account formats. + +use codec::{Encode, Decode}; +use sp_std::vec::Vec; + +/// A multi-format address wrapper for on-chain accounts. +#[derive(Encode, Decode, PartialEq, Eq, Clone, crate::RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Hash))] +pub enum MultiAddress { + /// It's an account ID (pubkey). + Id(AccountId), + /// It's an account index. + Index(#[codec(compact)] AccountIndex), + /// It's some arbitrary raw bytes. + Raw(Vec), + /// It's a 32 byte representation. + Address32([u8; 32]), + /// Its a 20 byte representation. + Address20([u8; 20]), +} + +#[cfg(feature = "std")] +impl std::fmt::Display for MultiAddress +where + AccountId: std::fmt::Debug, + AccountIndex: std::fmt::Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + use sp_core::hexdisplay::HexDisplay; + match self { + MultiAddress::Raw(inner) => write!(f, "MultiAddress::Raw({})", HexDisplay::from(inner)), + MultiAddress::Address32(inner) => write!(f, "MultiAddress::Address32({})", HexDisplay::from(inner)), + MultiAddress::Address20(inner) => write!(f, "MultiAddress::Address20({})", HexDisplay::from(inner)), + _ => write!(f, "{:?}", self), + } + } +} + +impl From for MultiAddress { + fn from(a: AccountId) -> Self { + MultiAddress::Id(a) + } +} + +impl Default for MultiAddress { + fn default() -> Self { + MultiAddress::Id(Default::default()) + } +} diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 4d2b1f062f716..4ce9ac0afa9a3 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -209,6 +209,44 @@ impl Lookup for IdentityLookup { fn lookup(&self, x: T) -> Result { Ok(x) } } +/// A lookup implementation returning the `AccountId` from a `MultiAddress`. +pub struct AccountIdLookup(PhantomData<(AccountId, AccountIndex)>); +impl StaticLookup for AccountIdLookup +where + AccountId: Codec + Clone + PartialEq + Debug, + AccountIndex: Codec + Clone + PartialEq + Debug, + crate::MultiAddress: Codec, +{ + type Source = crate::MultiAddress; + type Target = AccountId; + fn lookup(x: Self::Source) -> Result { + match x { + crate::MultiAddress::Id(i) => Ok(i), + _ => Err(LookupError), + } + } + fn unlookup(x: Self::Target) -> Self::Source { + crate::MultiAddress::Id(x) + } +} + +/// Perform a StaticLookup where there are multiple lookup sources of the same type. +impl StaticLookup for (A, B) +where + A: StaticLookup, + B: StaticLookup, +{ + type Source = A::Source; + type Target = A::Target; + + fn lookup(x: Self::Source) -> Result { + A::lookup(x.clone()).or_else(|_| B::lookup(x)) + } + fn unlookup(x: Self::Target) -> Self::Source { + A::unlookup(x) + } +} + /// Extensible conversion trait. Generic over both source and destination types. pub trait Convert { /// Make conversion. From 1c122c6d138f7f58f1ed7ce1414cf3dd283cd4ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Fri, 20 Nov 2020 13:24:02 +0100 Subject: [PATCH 24/34] Fix weight template to remove ugliness in rust doc (#7565) fixed weight template --- .maintain/frame-weight-template.hbs | 3 ++- utils/frame/benchmarking-cli/src/template.hbs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 146cc4cfcbdb5..aac37f0833c7d 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -15,7 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for {{pallet}} +//! Autogenerated weights for {{pallet}} +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}}, LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} //! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} diff --git a/utils/frame/benchmarking-cli/src/template.hbs b/utils/frame/benchmarking-cli/src/template.hbs index 7f7e2d6dcb995..fd066b1a3a8ae 100644 --- a/utils/frame/benchmarking-cli/src/template.hbs +++ b/utils/frame/benchmarking-cli/src/template.hbs @@ -1,5 +1,6 @@ {{header}} -//! Weights for {{pallet}} +//! Autogenerated weights for {{pallet}} +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}}, LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} //! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} From a0e8b7ecd1d1d44bfdad1e67ef4028121f7d9d82 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 20 Nov 2020 13:59:02 +0100 Subject: [PATCH 25/34] Cargo.lock: Run cargo update (#7553) * Cargo.lock: Run cargo update * Cargo.lock: Downgrade cc to v1.0.62 * Cargo.lock: Revert wasm-* updates --- Cargo.lock | 1420 ++++++++++++++++++--------------- bin/node/bench/src/tempdb.rs | 4 +- bin/node/testing/src/bench.rs | 2 +- 3 files changed, 778 insertions(+), 648 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 43af7d1a31674..c2e9cda8f31c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,11 +12,11 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" dependencies = [ - "gimli 0.22.0", + "gimli 0.23.0", ] [[package]] @@ -31,52 +31,52 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] name = "aes" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" +checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" dependencies = [ "aes-soft", "aesni", - "block-cipher 0.7.1", + "block-cipher", ] [[package]] name = "aes-gcm" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f5007801316299f922a6198d1d09a0bae95786815d066d5880d13f7c45ead1" +checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" dependencies = [ "aead", "aes", - "block-cipher 0.7.1", + "block-cipher", "ghash", - "subtle 2.2.3", + "subtle 2.3.0", ] [[package]] name = "aes-soft" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" +checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" dependencies = [ - "block-cipher 0.7.1", + "block-cipher", "byteorder", - "opaque-debug 0.2.3", + "opaque-debug 0.3.0", ] [[package]] name = "aesni" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050d39b0b7688b3a3254394c3e30a9d66c41dcf9b05b0e2dbdc623f6505d264" +checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" dependencies = [ - "block-cipher 0.7.1", - "opaque-debug 0.2.3", + "block-cipher", + "opaque-debug 0.3.0", ] [[package]] @@ -94,11 +94,17 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" +[[package]] +name = "ahash" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6789e291be47ace86a60303502173d84af8327e3627ecf334356ee0f87a164c" + [[package]] name = "aho-corasick" -version = "0.7.13" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" dependencies = [ "memchr", ] @@ -149,9 +155,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb544f1057eaaff4b34f8c4dcf56fc3cd04debd291998405d135017a7c3c0f4" +checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569" [[package]] name = "arc-swap" @@ -176,9 +182,9 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "asn1_der" @@ -214,9 +220,9 @@ dependencies = [ [[package]] name = "assert_matches" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +checksum = "695579f0f2520f3774bb40461e5adb066459d4e0af4d59d20175484fb8e9edf1" [[package]] name = "async-channel" @@ -231,9 +237,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d373d78ded7d0b3fa8039375718cde0aace493f2e34fb60f51cbf567562ca801" +checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" dependencies = [ "async-task", "concurrent-queue", @@ -245,9 +251,9 @@ dependencies = [ [[package]] name = "async-global-executor" -version = "1.3.0" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fefeb39da249f4c33af940b779a56723ce45809ef5c54dad84bb538d4ffb6d9e" +checksum = "73079b49cd26b8fd5a15f68fc7707fc78698dc2a3d61430f2a7a9430230dfa04" dependencies = [ "async-executor", "async-io", @@ -258,23 +264,21 @@ dependencies = [ [[package]] name = "async-io" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38628c78a34f111c5a6b98fc87dfc056cd1590b61afe748b145be4623c56d194" +checksum = "40a0b2bb8ae20fede194e779150fe283f65a4a08461b496de546ec366b174ad9" dependencies = [ - "cfg-if 0.1.10", "concurrent-queue", "fastrand", "futures-lite", "libc", "log", + "nb-connect", "once_cell", "parking", "polling", - "socket2", "vec-arena", "waker-fn", - "wepoll-sys-stjepang", "winapi 0.3.9", ] @@ -289,15 +293,15 @@ dependencies = [ [[package]] name = "async-std" -version = "1.6.5" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9fa76751505e8df1c7a77762f60486f60c71bbd9b8557f4da6ad47d083732ed" +checksum = "a7e82538bc65a25dbdff70e4c5439d52f068048ab97cdea0acd73f131594caa1" dependencies = [ "async-global-executor", "async-io", "async-mutex", "blocking", - "crossbeam-utils", + "crossbeam-utils 0.8.0", "futures-channel", "futures-core", "futures-io", @@ -316,9 +320,9 @@ dependencies = [ [[package]] name = "async-task" -version = "4.0.2" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ab27c1aa62945039e44edaeee1dc23c74cc0c303dd5fe0fb462a184f1c3a518" +checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-tls" @@ -335,9 +339,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.37" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caae68055714ff28740f310927e04f2eba76ff580b16fb18ed90073ee71646f7" +checksum = "b246867b8b3b6ae56035f1eb1ed557c1d8eae97f0d53696138a50fa0e3a3b8c0" dependencies = [ "proc-macro2", "quote", @@ -350,7 +354,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", ] [[package]] @@ -378,21 +382,21 @@ checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" [[package]] name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.50" +version = "0.3.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +checksum = "2baad346b2d4e94a24347adeee9c7a93f412ee94b9cc26e5b59dea23848e9f28" dependencies = [ "addr2line", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.20.0", + "object 0.22.0", "rustc-demangle", ] @@ -402,12 +406,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - [[package]] name = "base64" version = "0.12.3" @@ -472,15 +470,13 @@ dependencies = [ [[package]] name = "blake2" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ce5b6108f8e154604bd4eb76a2f726066c3464d5a552a4229262a18c9bb471" +checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" dependencies = [ - "byte-tools", - "byteorder", "crypto-mac 0.8.0", "digest 0.9.0", - "opaque-debug 0.2.3", + "opaque-debug 0.3.0", ] [[package]] @@ -495,23 +491,23 @@ dependencies = [ [[package]] name = "blake2b_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", - "arrayvec 0.5.1", + "arrayvec 0.5.2", "constant_time_eq", ] [[package]] name = "blake2s_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9e07352b829279624ceb7c64adb4f585dacdb81d35cafae81139ccd617cf44" +checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" dependencies = [ "arrayref", - "arrayvec 0.5.1", + "arrayvec 0.5.2", "constant_time_eq", ] @@ -534,16 +530,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ "block-padding 0.2.1", - "generic-array 0.14.3", -] - -[[package]] -name = "block-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa136449e765dc7faa244561ccae839c394048667929af599b5d931ebe7b7f10" -dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -552,7 +539,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -598,9 +585,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" +checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf" dependencies = [ "lazy_static", "memchr", @@ -666,11 +653,11 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cargo_metadata" -version = "0.10.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052dbdd9db69a339d5fa9ac87bfe2e1319f709119f0345988a597af82bb1011c" +checksum = "b8de60b887edf6d74370fc8eb177040da4847d971d6234c7b13a6da324ef0caf" dependencies = [ - "semver 0.10.0", + "semver 0.9.0", "serde", "serde_derive", "serde_json", @@ -687,9 +674,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.58" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" +checksum = "f1770ced377336a88a67c473594ccc14eca6f4559217c34f64aac8f83d641b40" dependencies = [ "jobserver", ] @@ -717,24 +704,24 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c0f07ac275808b7bf9a39f2fd013aae1498be83632814c8c4e0bd53f2dc58" +checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" dependencies = [ - "stream-cipher 0.4.1", + "stream-cipher", "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b0c90556d8e3fec7cf18d84a2f53d27b21288f2fe481b830fadcf809e48205" +checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" dependencies = [ "aead", "chacha20", "poly1305", - "stream-cipher 0.4.1", + "stream-cipher", "zeroize", ] @@ -754,15 +741,17 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.13" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ "js-sys", + "libc", "num-integer", "num-traits", "time", "wasm-bindgen", + "winapi 0.3.9", ] [[package]] @@ -778,9 +767,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.1" +version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ "ansi_term 0.11.0", "atty", @@ -858,6 +847,12 @@ dependencies = [ "proc-macro-hack", ] +[[package]] +name = "const_fn" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -910,7 +905,7 @@ dependencies = [ "log", "regalloc", "serde", - "smallvec 1.4.1", + "smallvec 1.5.0", "target-lexicon", "thiserror", ] @@ -948,7 +943,7 @@ checksum = "2ef419efb4f94ecc02e5d9fbcc910d2bb7f0040e2de570e63a454f883bc891d6" dependencies = [ "cranelift-codegen", "log", - "smallvec 1.4.1", + "smallvec 1.5.0", "target-lexicon", ] @@ -980,11 +975,11 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", ] [[package]] @@ -1023,32 +1018,67 @@ dependencies = [ "itertools 0.9.0", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.0", +] + [[package]] name = "crossbeam-deque" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-epoch 0.8.2", + "crossbeam-utils 0.7.2", "maybe-uninit", ] +[[package]] +name = "crossbeam-deque" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-epoch 0.9.0", + "crossbeam-utils 0.8.0", +] + [[package]] name = "crossbeam-epoch" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "cfg-if 0.1.10", - "crossbeam-utils", + "crossbeam-utils 0.7.2", "lazy_static", "maybe-uninit", "memoffset", "scopeguard", ] +[[package]] +name = "crossbeam-epoch" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0f606a85340376eef0d6d8fec399e6d4a544d648386c6645eb6d0653b27d9f" +dependencies = [ + "cfg-if 1.0.0", + "const_fn", + "crossbeam-utils 0.8.0", + "lazy_static", + "memoffset", + "scopeguard", +] + [[package]] name = "crossbeam-queue" version = "0.2.3" @@ -1056,7 +1086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ "cfg-if 0.1.10", - "crossbeam-utils", + "crossbeam-utils 0.7.2", "maybe-uninit", ] @@ -1066,11 +1096,23 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "cfg-if 0.1.10", "lazy_static", ] +[[package]] +name = "crossbeam-utils" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec91540d98355f690a86367e566ecad2e9e579f230230eb7c21398372be73ea5" +dependencies = [ + "autocfg 1.0.1", + "cfg-if 1.0.0", + "const_fn", + "lazy_static", +] + [[package]] name = "crunchy" version = "0.2.2" @@ -1093,15 +1135,15 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.3", - "subtle 2.2.3", + "generic-array 0.14.4", + "subtle 2.3.0", ] [[package]] name = "csv" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279" +checksum = "fc4666154fd004af3fd6f1da2e81a96fd5a81927fe8ddb6ecc79e2aa6e138b54" dependencies = [ "bstr", "csv-core", @@ -1130,9 +1172,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39858aa5bac06462d4dd4b9164848eb81ffc4aa5c479746393598fd193afa227" +checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484" dependencies = [ "quote", "syn", @@ -1158,7 +1200,7 @@ dependencies = [ "byteorder", "digest 0.8.1", "rand_core 0.5.1", - "subtle 2.2.3", + "subtle 2.3.0", "zeroize", ] @@ -1171,21 +1213,21 @@ dependencies = [ "byteorder", "digest 0.9.0", "rand_core 0.5.1", - "subtle 2.2.3", + "subtle 2.3.0", "zeroize", ] [[package]] name = "data-encoding" -version = "2.2.1" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" +checksum = "993a608597367c6377b258c25d7120740f00ed23a2252b729b1932dd7866f908" [[package]] name = "derive_more" -version = "0.99.9" +version = "0.99.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298998b1cf6b5b2c8a7b023dfd45821825ce3ba8a8af55c921a0e734e4653f76" +checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ "proc-macro2", "quote", @@ -1213,7 +1255,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] @@ -1282,15 +1324,15 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c53dc3a653e0f64081026e4bf048d48fec9fce90c66e8326ca7292df0ff2d82" +checksum = "d55796afa1b20c2945ca8eabfc421839f2b766619209f1ede813cf2484f31804" [[package]] name = "ed25519" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf038a7b6fd7ef78ad3348b63f3a17550877b0e28f8d68bcc94894d1412158bc" +checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" dependencies = [ "signature", ] @@ -1305,15 +1347,15 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.1", + "sha2 0.9.2", "zeroize", ] [[package]] name = "either" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "enumflags2" @@ -1365,9 +1407,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eab5ee3df98a279d9b316b1af6ac95422127b1290317e6d18c1743c99418b01" +checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" dependencies = [ "errno-dragonfly", "libc", @@ -1397,6 +1439,23 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethereum" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df706418ff7d3874b9506424b04ea0bef569a2b39412b43a27ea86e679be108e" +dependencies = [ + "ethereum-types", + "hash-db", + "hash256-std-hasher", + "parity-scale-codec", + "rlp", + "rlp-derive", + "serde", + "sha3 0.9.1", + "triehash", +] + [[package]] name = "ethereum-types" version = "0.9.2" @@ -1419,13 +1478,16 @@ checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" [[package]] name = "evm" -version = "0.17.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68224b0aa788720ef0c8a23030a4412a021ed73df069a922bee8f0db9ed617e2" +checksum = "16c8deca0ec3efa361b03d9cae6fe94321a1d2d0a523437edd720b3d140e3c08" dependencies = [ + "ethereum", "evm-core", "evm-gasometer", "evm-runtime", + "log", + "parity-scale-codec", "primitive-types", "rlp", "serde", @@ -1434,18 +1496,19 @@ dependencies = [ [[package]] name = "evm-core" -version = "0.17.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a040378759577447945c89da1b07d6e33fda32a97a104afe0ec3fa1c382949d" +checksum = "cf2d732b3c36df36833761cf67df8f65866be1d368d20508bc3e13e6f256c8c5" dependencies = [ + "log", "primitive-types", ] [[package]] name = "evm-gasometer" -version = "0.17.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb5bc051afad6bb0735c82b46656bbdfac41917861307a608b1404a546fec42" +checksum = "46de1b91ccd744627484183729f1b5af484b3bf15505007fc28cc54264cb9ea1" dependencies = [ "evm-core", "evm-runtime", @@ -1454,9 +1517,9 @@ dependencies = [ [[package]] name = "evm-runtime" -version = "0.17.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7410f5677a52203d3fca02b0eb8f96f9799f3a45cff82946a8ed28379e6b1b04" +checksum = "f2c1d1ffe96f833788512c890d702457d790dba4917ac6f64f8f60fbd9bc40b8" dependencies = [ "evm-core", "primitive-types", @@ -1469,7 +1532,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", ] [[package]] @@ -1526,9 +1589,9 @@ dependencies = [ [[package]] name = "file-per-thread-logger" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b3937f028664bd0e13df401ba49a4567ccda587420365823242977f06609ed1" +checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" dependencies = [ "env_logger", "log", @@ -1541,7 +1604,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" dependencies = [ "either", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 2.0.2", "log", "num-traits", @@ -1570,11 +1633,11 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.16" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" +checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "crc32fast", "libc", "libz-sys", @@ -1594,6 +1657,16 @@ dependencies = [ "parity-scale-codec", ] +[[package]] +name = "form_urlencoded" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +dependencies = [ + "matches", + "percent-encoding 2.1.0", +] + [[package]] name = "frame-benchmarking" version = "2.0.0" @@ -1679,7 +1752,7 @@ dependencies = [ "paste 0.1.18", "pretty_assertions", "serde", - "smallvec 1.4.1", + "smallvec 1.5.0", "sp-api", "sp-arithmetic", "sp-core", @@ -1806,9 +1879,9 @@ dependencies = [ [[package]] name = "fs_extra" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" [[package]] name = "fuchsia-cprng" @@ -1834,15 +1907,15 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.1.29" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +checksum = "4c7e4c2612746b0df8fed4ce0c69156021b704c9aefa360311c04e6e9e002eed" [[package]] name = "futures" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" dependencies = [ "futures-channel", "futures-core", @@ -1855,9 +1928,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" dependencies = [ "futures-core", "futures-sink", @@ -1865,9 +1938,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" +checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" [[package]] name = "futures-cpupool" @@ -1875,7 +1948,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "num_cpus", ] @@ -1885,21 +1958,21 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" dependencies = [ - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.8", "lazy_static", "log", "parking_lot 0.9.0", - "pin-project 0.4.22", + "pin-project 0.4.27", "serde", "serde_json", ] [[package]] name = "futures-executor" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" dependencies = [ "futures-core", "futures-task", @@ -1909,15 +1982,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" [[package]] name = "futures-lite" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381a7ad57b1bad34693f63f6f377e1abded7a9c85c9d3eb6771e11c60aaadab9" +checksum = "5e6c079abfac3ab269e2927ec048dabc89d009ebfdda6b8ee86624f30c689658" dependencies = [ "fastrand", "futures-core", @@ -1930,9 +2003,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -1942,15 +2015,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" dependencies = [ "once_cell", ] @@ -1973,11 +2046,11 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "futures-channel", "futures-core", "futures-io", @@ -1985,7 +2058,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 0.4.22", + "pin-project 1.0.2", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1999,9 +2072,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "memchr", - "pin-project 0.4.22", + "pin-project 0.4.27", ] [[package]] @@ -2010,6 +2083,19 @@ version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" +[[package]] +name = "generator" +version = "0.6.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" +dependencies = [ + "cc", + "libc", + "log", + "rustc_version", + "winapi 0.3.9", +] + [[package]] name = "generic-array" version = "0.12.3" @@ -2021,9 +2107,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60fb4bb6bba52f78a471264d9a3b7d026cc0af47b22cd2cffbc0b787ca003e63" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", "version_check", @@ -2031,13 +2117,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ "cfg-if 0.1.10", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -2049,7 +2135,7 @@ checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" dependencies = [ "cfg-if 0.1.10", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] @@ -2074,9 +2160,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" [[package]] name = "glob" @@ -2086,9 +2172,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "globset" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad1da430bd7281dde2576f44c84cc3f0f7b475e7202cd503042dff01a8c8120" +checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" dependencies = [ "aho-corasick", "bstr", @@ -2119,7 +2205,7 @@ dependencies = [ "byteorder", "bytes 0.4.12", "fnv", - "futures 0.1.29", + "futures 0.1.30", "http 0.1.21", "indexmap", "log", @@ -2130,9 +2216,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" +checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ "bytes 0.5.6", "fnv", @@ -2142,9 +2228,10 @@ dependencies = [ "http 0.2.1", "indexmap", "slab", - "tokio 0.2.22", + "tokio 0.2.23", "tokio-util", "tracing", + "tracing-futures", ] [[package]] @@ -2155,9 +2242,9 @@ checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" [[package]] name = "handlebars" -version = "3.5.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcd1b5399b9884f9ae18b5d4105d180720c8f602aeb73d3ceae9d6b1d13a5fa7" +checksum = "2764f9796c0ddca4b82c07f25dd2cb3db30b9a8f47940e78e1c883d9e95c3db9" dependencies = [ "log", "pest", @@ -2194,12 +2281,21 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb" +checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" dependencies = [ "ahash 0.3.8", - "autocfg 1.0.0", + "autocfg 1.0.1", +] + +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +dependencies = [ + "ahash 0.4.6", ] [[package]] @@ -2213,9 +2309,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.15" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" dependencies = [ "libc", ] @@ -2271,9 +2367,9 @@ dependencies = [ [[package]] name = "honggfuzz" -version = "0.5.49" +version = "0.5.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "832bac18a82ec7d6c21887daa8616b238fe90d5d5e762d0d4b9372cdaa9e097f" +checksum = "6f085725a5828d7e959f014f624773094dfe20acc91be310ef106923c30594bc" dependencies = [ "arbitrary", "lazy_static", @@ -2309,7 +2405,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "http 0.1.21", "tokio-buf", ] @@ -2330,6 +2426,12 @@ version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +[[package]] +name = "httpdate" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" + [[package]] name = "humantime" version = "1.3.0" @@ -2346,7 +2448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "futures-cpupool", "h2 0.1.26", "http 0.1.21", @@ -2371,23 +2473,23 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.7" +version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" +checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2 0.2.6", + "h2 0.2.7", "http 0.2.1", "http-body 0.3.1", "httparse", + "httpdate", "itoa", - "pin-project 0.4.22", + "pin-project 1.0.2", "socket2", - "time", - "tokio 0.2.22", + "tokio 0.2.23", "tower-service", "tracing", "want 0.3.0", @@ -2402,11 +2504,11 @@ dependencies = [ "bytes 0.5.6", "ct-logs", "futures-util", - "hyper 0.13.7", + "hyper 0.13.9", "log", "rustls", "rustls-native-certs", - "tokio 0.2.22", + "tokio 0.2.23", "tokio-rustls", "webpki", ] @@ -2435,9 +2537,9 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12906406f12abf5569643c46b29aec78313dc1537b17dd5c5250169790c4db9" +checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48" dependencies = [ "if-addrs-sys", "libc", @@ -2446,9 +2548,9 @@ dependencies = [ [[package]] name = "if-addrs-sys" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e2556f16544202bcfe0aa5d20a01a6b815f736b136b3ad76dc547ee6b5bb1df" +checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" dependencies = [ "cc", "libc", @@ -2494,26 +2596,32 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b88cd59ee5f71fea89a62248fc8f387d44400cefe05ef548466d61ced9029a7" +checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" dependencies = [ - "autocfg 1.0.0", - "hashbrown 0.8.1", + "autocfg 1.0.1", + "hashbrown 0.9.1", "serde", ] [[package]] name = "instant" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485" +checksum = "cb1fc4429a33e1f80d41dc9fea4d108a88bec1de8053878898ae448a0b52f613" +dependencies = [ + "cfg-if 1.0.0", +] [[package]] name = "integer-sqrt" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] [[package]] name = "intervalier" @@ -2521,7 +2629,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 2.0.2", ] @@ -2581,9 +2689,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.39" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" +checksum = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73" dependencies = [ "wasm-bindgen", ] @@ -2595,7 +2703,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" dependencies = [ "failure", - "futures 0.1.29", + "futures 0.1.30", "hyper 0.12.35", "jsonrpc-core", "jsonrpc-pubsub", @@ -2611,7 +2719,7 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "log", "serde", "serde_derive", @@ -2754,7 +2862,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0315ef2f688e33844400b31f11c263f2b3dc21d8b9355c6891c5f185fae43f9a" dependencies = [ "parity-util-mem", - "smallvec 1.4.1", + "smallvec 1.5.0", ] [[package]] @@ -2783,7 +2891,7 @@ dependencies = [ "parking_lot 0.10.2", "regex", "rocksdb", - "smallvec 1.4.1", + "smallvec 1.5.0", ] [[package]] @@ -2792,7 +2900,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2701a1369d6ea4f1b9f606db46e5e2a4a8e47f22530a07823d653f85ab1f6c34" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "js-sys", "kvdb", "kvdb-memorydb", @@ -2811,9 +2919,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lazycell" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "leb128" @@ -2823,9 +2931,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" +checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" [[package]] name = "libloading" @@ -2851,7 +2959,7 @@ checksum = "e3c2b4c99f8798be90746fc226acf95d3e6cff0655883634cc30dab1f64f438b" dependencies = [ "atomic", "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "lazy_static", "libp2p-core", "libp2p-core-derive", @@ -2876,9 +2984,9 @@ dependencies = [ "libp2p-yamux", "multihash", "parity-multiaddr", - "parking_lot 0.11.0", - "pin-project 1.0.1", - "smallvec 1.4.1", + "parking_lot 0.11.1", + "pin-project 1.0.2", + "smallvec 1.5.0", "wasm-timer", ] @@ -2893,7 +3001,7 @@ dependencies = [ "ed25519-dalek", "either", "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "lazy_static", "libsecp256k1", @@ -2901,15 +3009,15 @@ dependencies = [ "multihash", "multistream-select", "parity-multiaddr", - "parking_lot 0.11.0", - "pin-project 1.0.1", + "parking_lot 0.11.1", + "pin-project 1.0.2", "prost", "prost-build", "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.1", - "smallvec 1.4.1", + "sha2 0.9.2", + "smallvec 1.5.0", "thiserror", "unsigned-varint 0.5.1", "void", @@ -2933,7 +3041,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aea69349e70a58ef9ecd21ac12c5eaa36255ac6986828079d26393f9e618cb" dependencies = [ "flate2", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", ] @@ -2943,7 +3051,7 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0baeff71fb5cb1fe1604f74a712a44b66a8c5900f4022411a1d550f09d6bb776" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "log", ] @@ -2956,14 +3064,14 @@ checksum = "db0f925a45f310b678e70faf71a10023b829d02eb9cc2628a63de928936f3ade" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "libp2p-swarm", "log", "prost", "prost-build", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.5.0", ] [[package]] @@ -2976,7 +3084,7 @@ dependencies = [ "byteorder", "bytes 0.5.6", "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures_codec", "hex_fmt", "libp2p-core", @@ -2986,8 +3094,8 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.1", - "smallvec 1.4.1", + "sha2 0.9.2", + "smallvec 1.5.0", "unsigned-varint 0.5.1", "wasm-timer", ] @@ -2998,13 +3106,13 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e074124669840484de564901d47f2d0892e73f6d8ee7c37e9c2644af1b217bf4" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "libp2p-swarm", "log", "prost", "prost-build", - "smallvec 1.4.1", + "smallvec 1.5.0", "wasm-timer", ] @@ -3014,11 +3122,11 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78a2653b2e3254a3bbeb66bfc3f0dca7d6cba6aa2a96791db114003dec1b5394" dependencies = [ - "arrayvec 0.5.1", + "arrayvec 0.5.2", "bytes 0.5.6", "either", "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures_codec", "libp2p-core", "libp2p-swarm", @@ -3027,8 +3135,8 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.1", - "smallvec 1.4.1", + "sha2 0.9.2", + "smallvec 1.5.0", "uint", "unsigned-varint 0.5.1", "void", @@ -3045,14 +3153,14 @@ dependencies = [ "data-encoding", "dns-parser", "either", - "futures 0.3.5", + "futures 0.3.8", "lazy_static", "libp2p-core", "libp2p-swarm", "log", "net2", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.5.0", "void", "wasm-timer", ] @@ -3064,14 +3172,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed764eab613a8fb6b7dcf6c796f55a06fef2270e528329903e25cd3311b99663" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "futures_codec", "libp2p-core", "log", "nohash-hasher", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.5.0", "unsigned-varint 0.5.1", ] @@ -3083,17 +3191,17 @@ checksum = "fb441fb015ec16690099c5d910fcba271d357763b3dcb784db7b27bbb0b68372" dependencies = [ "bytes 0.5.6", "curve25519-dalek 3.0.0", - "futures 0.3.5", + "futures 0.3.8", "lazy_static", "libp2p-core", "log", "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.1", + "sha2 0.9.2", "snow", "static_assertions", - "x25519-dalek 1.1.0", + "x25519-dalek", "zeroize", ] @@ -3103,7 +3211,7 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e5c50936cfdbe96a514e8992f304fa44cd3a681b6f779505f1ae62b3474705" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "libp2p-swarm", "log", @@ -3119,7 +3227,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21026557c335d3639591f247b19b7536195772034ec7e9c463137227f95eaaa1" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "futures_codec", "libp2p-core", "log", @@ -3135,9 +3243,9 @@ version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96b3c2d5d26a9500e959a0e19743897239a6c4be78dadf99b70414301a70c006" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "log", - "pin-project 0.4.22", + "pin-project 0.4.27", "rand 0.7.3", "salsa20", "sha3 0.9.1", @@ -3151,14 +3259,14 @@ checksum = "2dd9a1e0e6563dec1c9e702f7e68bdaa43da62a84536aa06372d3fed3e25d4ca" dependencies = [ "async-trait", "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "libp2p-swarm", "log", - "lru 0.6.0", + "lru 0.6.1", "minicbor", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.5.0", "unsigned-varint 0.5.1", "wasm-timer", ] @@ -3170,11 +3278,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "565f0e06674b4033c978471e4083d5aaa8e03cef0719a0ec0905aaeaad39a919" dependencies = [ "either", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "log", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.5.0", "void", "wasm-timer", ] @@ -3186,7 +3294,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33f3dce259c0d3127af5167f45c275b6c047320efdd0e40fde947482487af0a3" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "if-addrs", "ipnet", @@ -3202,7 +3310,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e0aba04370a00d8d0236e350bc862926c1b42542a169aa6a481e660e5b990fe" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "log", ] @@ -3213,7 +3321,7 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c703816f4170477a375b49c56d349e535ce68388f81ba1d9a3c8e2517effa82" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -3229,14 +3337,14 @@ checksum = "8d5e7268a959748040a0cf7456ad655be55b87f0ceda03bdb5b53674726b28f7" dependencies = [ "async-tls", "either", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "log", "quicksink", "rustls", "rw-stream-sink", "soketto", - "url 2.1.1", + "url 2.2.0", "webpki", "webpki-roots", ] @@ -3247,9 +3355,9 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a0798cbb58535162c40858493d09af06eac42a26e4966e58de0df701f559348" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "thiserror", "yamux", ] @@ -3278,18 +3386,17 @@ dependencies = [ "hmac-drbg", "rand 0.7.3", "sha2 0.8.2", - "subtle 2.2.3", + "subtle 2.3.0", "typenum", ] [[package]] name = "libz-sys" -version = "1.0.25" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" +checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ "cc", - "libc", "pkg-config", "vcpkg", ] @@ -3322,9 +3429,9 @@ dependencies = [ [[package]] name = "lite-json" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c73e713a23ac6e12074c9e96ef2dfb770921e0cb9244c093bd38424209e0e523" +checksum = "0460d985423a026b4d9b828a7c6eed1bcf606f476322f3f9b507529686a61715" dependencies = [ "lite-parser", ] @@ -3349,9 +3456,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" dependencies = [ "scopeguard", ] @@ -3365,6 +3472,19 @@ dependencies = [ "cfg-if 0.1.10", ] +[[package]] +name = "loom" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" +dependencies = [ + "cfg-if 0.1.10", + "generator", + "scoped-tls", + "serde", + "serde_json", +] + [[package]] name = "lru" version = "0.4.3" @@ -3385,11 +3505,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111b945ac72ec09eb7bc62a0fbdc3cc6e80555a7245f52a69d3921a75b53b153" +checksum = "be716eb6878ca2263eb5d00a781aa13264a794f519fe6af4fbb2668b2d5441c0" dependencies = [ - "hashbrown 0.8.1", + "hashbrown 0.9.1", ] [[package]] @@ -3445,9 +3565,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memmap" @@ -3461,11 +3581,11 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", ] [[package]] @@ -3475,7 +3595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36f36ddb0b2cdc25d38babba472108798e3477f02be5165f038c5e393e50c57a" dependencies = [ "hash-db", - "hashbrown 0.8.1", + "hashbrown 0.8.2", "parity-util-mem", ] @@ -3519,11 +3639,12 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" dependencies = [ "adler", + "autocfg 1.0.1", ] [[package]] @@ -3565,7 +3686,7 @@ checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ "log", "mio", - "miow 0.3.5", + "miow 0.3.6", "winapi 0.3.9", ] @@ -3594,9 +3715,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ "socket2", "winapi 0.3.9", @@ -3618,16 +3739,16 @@ dependencies = [ "blake2s_simd", "digest 0.9.0", "sha-1 0.9.2", - "sha2 0.9.1", + "sha2 0.9.2", "sha3 0.9.1", "unsigned-varint 0.5.1", ] [[package]] name = "multimap" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" +checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "multistream-select" @@ -3636,10 +3757,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93faf2e41f9ee62fb01680ed48f3cc26652352327aa2e59869070358f6b7dd75" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "log", - "pin-project 1.0.1", - "smallvec 1.4.1", + "pin-project 1.0.2", + "smallvec 1.5.0", "unsigned-varint 0.5.1", ] @@ -3669,11 +3790,21 @@ dependencies = [ "rand 0.3.23", ] +[[package]] +name = "nb-connect" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8123a81538e457d44b933a02faf885d3fe8408806b23fa700e8f01c6c3a98998" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "net2" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853" dependencies = [ "cfg-if 0.1.10", "libc", @@ -3699,7 +3830,7 @@ version = "0.8.0" dependencies = [ "derive_more", "fs_extra", - "futures 0.3.5", + "futures 0.3.8", "hash-db", "hex", "kvdb", @@ -3735,7 +3866,7 @@ dependencies = [ name = "node-browser-testing" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "jsonrpc-core", "libp2p", @@ -3756,7 +3887,7 @@ dependencies = [ "frame-benchmarking-cli", "frame-support", "frame-system", - "futures 0.3.5", + "futures 0.3.8", "hex-literal", "log", "nix", @@ -3923,7 +4054,7 @@ dependencies = [ name = "node-rpc-client" version = "2.0.0" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "hyper 0.12.35", "jsonrpc-core-client", "log", @@ -4078,7 +4209,7 @@ dependencies = [ "frame-support", "frame-system", "fs_extra", - "futures 0.3.5", + "futures 0.3.8", "log", "node-executor", "node-primitives", @@ -4142,7 +4273,7 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-integer", "num-traits", ] @@ -4153,17 +4284,17 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-traits", ] [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-traits", ] @@ -4173,7 +4304,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-bigint", "num-integer", "num-traits", @@ -4181,11 +4312,11 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "libm", ] @@ -4216,13 +4347,19 @@ dependencies = [ "wasmparser 0.57.0", ] +[[package]] +name = "object" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" + [[package]] name = "once_cell" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" dependencies = [ - "parking_lot 0.11.0", + "parking_lot 0.11.1", ] [[package]] @@ -4430,7 +4567,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "parity-wasm 0.41.0", - "paste 1.0.0", + "paste 1.0.3", "pretty_assertions", "pwasm-utils 0.16.0", "rand 0.7.3", @@ -5051,7 +5188,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "serde", - "smallvec 1.4.1", + "smallvec 1.5.0", "sp-core", "sp-io", "sp-runtime", @@ -5157,9 +5294,9 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.9.4" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fe99b938abd57507e37f8d4ef30cd74b33c71face2809b37b8beb71bab15ab" +checksum = "43244a26dc1ddd3097216bb12eaa6cf8a07b060c72718d9ebd60fd297d6401df" dependencies = [ "arrayref", "bs58 0.4.0", @@ -5170,16 +5307,16 @@ dependencies = [ "serde", "static_assertions", "unsigned-varint 0.5.1", - "url 2.1.1", + "url 2.2.0", ] [[package]] name = "parity-scale-codec" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d38aeaffc032ec69faa476b3caaca8d4dd7f3f798137ff30359e5c7869ceb6" +checksum = "7c740e5fbcb6847058b40ac7e5574766c6388f585e184d769910fe0d3a2ca861" dependencies = [ - "arrayvec 0.5.1", + "arrayvec 0.5.2", "bitvec", "byte-slice-cast", "parity-scale-codec-derive", @@ -5188,9 +5325,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd20ff7e0399b274a5f5bb37b712fccb5b3a64b9128200d1c3cc40fe709cb073" +checksum = "198db82bb1c18fc00176004462dd809b2a6d851669550aa17af6dacd21ae0c14" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5211,11 +5348,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "libc", "log", "mio-named-pipes", - "miow 0.3.5", + "miow 0.3.6", "rand 0.7.3", "tokio 0.1.22", "tokio-named-pipes", @@ -5231,13 +5368,13 @@ checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" dependencies = [ "cfg-if 0.1.10", "ethereum-types", - "hashbrown 0.8.1", + "hashbrown 0.8.2", "impl-trait-for-tuples", "lru 0.5.3", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", - "smallvec 1.4.1", + "smallvec 1.5.0", "winapi 0.3.9", ] @@ -5282,7 +5419,7 @@ dependencies = [ "rand 0.7.3", "sha-1 0.8.2", "slab", - "url 2.1.1", + "url 2.2.0", ] [[package]] @@ -5314,12 +5451,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", - "lock_api 0.4.1", + "lock_api 0.4.2", "parking_lot_core 0.8.0", ] @@ -5348,7 +5485,7 @@ dependencies = [ "cloudabi 0.0.3", "libc", "redox_syscall", - "smallvec 1.4.1", + "smallvec 1.5.0", "winapi 0.3.9", ] @@ -5363,7 +5500,7 @@ dependencies = [ "instant", "libc", "redox_syscall", - "smallvec 1.4.1", + "smallvec 1.5.0", "winapi 0.3.9", ] @@ -5379,9 +5516,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ddc8e145de01d9180ac7b78b9676f95a9c2447f6a88b2c2a04702211bc5d71" +checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" [[package]] name = "paste-impl" @@ -5490,27 +5627,27 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.22" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" dependencies = [ - "pin-project-internal 0.4.22", + "pin-project-internal 0.4.27", ] [[package]] name = "pin-project" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" +checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" dependencies = [ - "pin-project-internal 1.0.1", + "pin-project-internal 1.0.2", ] [[package]] name = "pin-project-internal" -version = "0.4.22" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" dependencies = [ "proc-macro2", "quote", @@ -5519,9 +5656,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" +checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" dependencies = [ "proc-macro2", "quote", @@ -5530,9 +5667,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.7" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" +checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-utils" @@ -5542,9 +5679,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "platforms" @@ -5566,31 +5703,31 @@ dependencies = [ [[package]] name = "polling" -version = "1.1.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0720e0b9ea9d52451cf29d3413ba8a9303f8815d9d9653ef70e03ff73e65566" +checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" dependencies = [ "cfg-if 0.1.10", "libc", "log", - "wepoll-sys-stjepang", + "wepoll-sys", "winapi 0.3.9", ] [[package]] name = "poly1305" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b42192ab143ed7619bf888a7f9c6733a9a2153b218e2cd557cfdb52fbf9bb1" +checksum = "22ce46de8e53ee414ca4d02bfefac75d8c12fba948b76622a40b4be34dfce980" dependencies = [ "universal-hash", ] [[package]] name = "polyval" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" +checksum = "a5884790f1ce3553ad55fec37b5aaac5882e0e845a2612df744d6c85c9bf046c" dependencies = [ "cfg-if 0.1.10", "universal-hash", @@ -5598,9 +5735,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "predicates" @@ -5642,9 +5779,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55c21c64d0eaa4d7ed885d959ef2d62d9e488c27c0e02d9aa5ce6c877b7d5f8" +checksum = "7dd39dcacf71411ba488570da7bbc89b717225e46478b30ba99b92db6b149809" dependencies = [ "fixed-hash", "impl-codec", @@ -5664,9 +5801,9 @@ dependencies = [ [[package]] name = "proc-macro-error" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", @@ -5677,22 +5814,20 @@ dependencies = [ [[package]] name = "proc-macro-error-attr" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", - "syn", - "syn-mid", "version_check", ] [[package]] name = "proc-macro-hack" -version = "0.5.16" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" @@ -5718,7 +5853,7 @@ dependencies = [ "cfg-if 0.1.10", "fnv", "lazy_static", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "regex", "thiserror", ] @@ -5907,7 +6042,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.14", + "getrandom 0.1.15", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -5956,7 +6091,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.14", + "getrandom 0.1.15", ] [[package]] @@ -6059,25 +6194,25 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080" +checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ - "autocfg 1.0.0", - "crossbeam-deque", + "autocfg 1.0.1", + "crossbeam-deque 0.8.0", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.7.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280" +checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ - "crossbeam-deque", - "crossbeam-queue", - "crossbeam-utils", + "crossbeam-channel", + "crossbeam-deque 0.8.0", + "crossbeam-utils 0.8.0", "lazy_static", "num_cpus", ] @@ -6099,29 +6234,29 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.14", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] [[package]] name = "ref-cast" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" +checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" +checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" dependencies = [ "proc-macro2", "quote", @@ -6136,7 +6271,7 @@ checksum = "b9ba8aaf5fe7cf307c6dbdaeed85478961d29e25e3bee5169e11b92fa9f027a8" dependencies = [ "log", "rustc-hash", - "smallvec 1.4.1", + "smallvec 1.5.0", ] [[package]] @@ -6196,9 +6331,9 @@ checksum = "e005d658ad26eacc2b6c506dfde519f4e277e328d0eb3379ca61647d70a8f531" [[package]] name = "ring" -version = "0.16.15" +version = "0.16.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +checksum = "b72b84d47e8ec5a4f2872e8262b8f8256c5be1c938a7d6d3a867a3ba8f722f74" dependencies = [ "cc", "libc", @@ -6222,13 +6357,24 @@ dependencies = [ [[package]] name = "rlp" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a7d3f9bed94764eac15b8f14af59fac420c236adaff743b7bcc88e265cb4345" +checksum = "1190dcc8c3a512f1eef5d09bb8c84c7f39e1054e174d1795482e18f5272f2e73" dependencies = [ "rustc-hex", ] +[[package]] +name = "rlp-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "rocksdb" version = "0.15.0" @@ -6251,21 +6397,21 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "0.7.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" +checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" dependencies = [ - "base64 0.11.0", + "base64 0.12.3", "blake2b_simd", "constant_time_eq", - "crossbeam-utils", + "crossbeam-utils 0.7.2", ] [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" [[package]] name = "rustc-hash" @@ -6290,9 +6436,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ "base64 0.12.3", "log", @@ -6315,14 +6461,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bdc5e856e51e685846fb6c13a1f5e5432946c2c90501bdc76a1319f19e29da" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" [[package]] name = "rw-stream-sink" @@ -6330,8 +6471,8 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.5", - "pin-project 0.4.22", + "futures 0.3.8", + "pin-project 0.4.27", "static_assertions", ] @@ -6356,7 +6497,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7f47b10fa80f6969bbbd9c8e7cc998f082979d402a9e10579e2303a87955395" dependencies = [ - "stream-cipher 0.7.1", + "stream-cipher", ] [[package]] @@ -6376,7 +6517,7 @@ dependencies = [ "bytes 0.5.6", "derive_more", "either", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", "log", @@ -6405,7 +6546,7 @@ dependencies = [ name = "sc-basic-authorship" version = "0.8.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -6482,7 +6623,7 @@ dependencies = [ "atty", "chrono", "fdlimit", - "futures 0.3.5", + "futures 0.3.8", "hex", "lazy_static", "libp2p", @@ -6513,7 +6654,7 @@ dependencies = [ "tempfile", "thiserror", "tiny-bip39", - "tokio 0.2.22", + "tokio 0.2.23", "tracing", "tracing-log", "tracing-subscriber", @@ -6535,7 +6676,7 @@ version = "2.0.0" dependencies = [ "derive_more", "fnv", - "futures 0.3.5", + "futures 0.3.8", "hash-db", "hex-literal", "kvdb", @@ -6617,7 +6758,7 @@ name = "sc-consensus-aura" version = "0.8.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -6657,7 +6798,7 @@ version = "0.8.0" dependencies = [ "derive_more", "fork-tree", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "log", "merlin", @@ -6710,7 +6851,7 @@ name = "sc-consensus-babe-rpc" version = "0.8.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6752,7 +6893,7 @@ version = "0.8.0" dependencies = [ "assert_matches", "derive_more", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6780,7 +6921,7 @@ dependencies = [ "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", "tempfile", - "tokio 0.2.22", + "tokio 0.2.23", ] [[package]] @@ -6788,7 +6929,7 @@ name = "sc-consensus-pow" version = "0.8.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -6810,7 +6951,7 @@ dependencies = [ name = "sc-consensus-slots" version = "0.8.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -6940,12 +7081,12 @@ dependencies = [ "derive_more", "finality-grandpa", "fork-tree", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "log", "parity-scale-codec", "parking_lot 0.10.2", - "pin-project 0.4.22", + "pin-project 0.4.27", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -6974,7 +7115,7 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.22", + "tokio 0.2.23", ] [[package]] @@ -6983,7 +7124,7 @@ version = "0.8.0" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -7012,7 +7153,7 @@ name = "sc-informant" version = "0.8.0" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.5", + "futures 0.3.8", "log", "parity-util-mem", "sc-client-api", @@ -7030,7 +7171,7 @@ version = "2.0.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.8", "futures-util", "hex", "merlin", @@ -7040,7 +7181,7 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-keystore", - "subtle 2.2.3", + "subtle 2.3.0", "tempfile", ] @@ -7077,7 +7218,7 @@ dependencies = [ "erased-serde", "fnv", "fork-tree", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "futures_codec", "hex", @@ -7090,7 +7231,7 @@ dependencies = [ "nohash-hasher", "parity-scale-codec", "parking_lot 0.10.2", - "pin-project 0.4.22", + "pin-project 0.4.27", "prost", "prost-build", "quickcheck", @@ -7128,7 +7269,7 @@ name = "sc-network-gossip" version = "0.8.0" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", "log", @@ -7145,7 +7286,7 @@ dependencies = [ name = "sc-network-test" version = "0.8.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", "log", @@ -7173,9 +7314,9 @@ version = "2.0.0" dependencies = [ "bytes 0.5.6", "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "hyper 0.13.7", + "hyper 0.13.9", "hyper-rustls", "lazy_static", "log", @@ -7197,14 +7338,14 @@ dependencies = [ "sp-utils", "substrate-test-runtime-client", "threadpool", - "tokio 0.2.22", + "tokio 0.2.23", ] [[package]] name = "sc-peerset" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p", "log", "rand 0.7.3", @@ -7226,8 +7367,8 @@ name = "sc-rpc" version = "2.0.0" dependencies = [ "assert_matches", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.8", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", @@ -7266,7 +7407,7 @@ name = "sc-rpc-api" version = "0.8.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -7288,7 +7429,7 @@ dependencies = [ name = "sc-rpc-server" version = "2.0.0" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "jsonrpc-core", "jsonrpc-http-server", "jsonrpc-ipc-server", @@ -7323,8 +7464,8 @@ dependencies = [ "derive_more", "directories", "exit-future", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.8", "futures-timer 3.0.2", "hash-db", "jsonrpc-core", @@ -7334,7 +7475,7 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.2", - "pin-project 0.4.22", + "pin-project 0.4.27", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -7379,7 +7520,7 @@ dependencies = [ "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.22", + "tokio 0.2.23", "tracing", "tracing-futures", "wasm-timer", @@ -7390,8 +7531,8 @@ name = "sc-service-test" version = "2.0.0" dependencies = [ "fdlimit", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.8", "hex-literal", "log", "parity-scale-codec", @@ -7456,12 +7597,12 @@ dependencies = [ name = "sc-telemetry" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", "log", "parking_lot 0.10.2", - "pin-project 0.4.22", + "pin-project 0.4.27", "rand 0.7.3", "serde", "slog", @@ -7497,7 +7638,7 @@ dependencies = [ "assert_matches", "criterion", "derive_more", - "futures 0.3.5", + "futures 0.3.8", "linked-hash-map", "log", "parity-scale-codec", @@ -7520,7 +7661,7 @@ version = "2.0.0" dependencies = [ "assert_matches", "derive_more", - "futures 0.3.5", + "futures 0.3.8", "futures-diagnose", "hex", "intervalier", @@ -7563,14 +7704,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" dependencies = [ "arrayref", - "arrayvec 0.5.1", + "arrayvec 0.5.2", "curve25519-dalek 2.1.0", - "getrandom 0.1.14", + "getrandom 0.1.15", "merlin", "rand 0.7.3", "rand_core 0.5.1", "sha2 0.8.2", - "subtle 2.2.3", + "subtle 2.3.0", "zeroize", ] @@ -7588,18 +7729,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scroll" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb2332cb595d33f7edd5700f4cbf94892e680c7f0ae56adab58a35190b66cb1" +checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" dependencies = [ "scroll_derive", ] [[package]] name = "scroll_derive" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" +checksum = "b12bd20b94c7cdfda8c7ba9b92ad0d9a56e3fa018c25fca83b51aa664c9b4c0d" dependencies = [ "proc-macro2", "quote", @@ -7662,15 +7803,6 @@ name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "394cec28fa623e00903caf7ba4fa6fb9a0e260280bb8cdbbba029611108a0190" dependencies = [ "semver-parser", "serde", @@ -7702,9 +7834,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fe57af81d28386a513cbc6858332abc6117cfdb5999647c6444b8f43a370a5" +checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" dependencies = [ "serde_derive", ] @@ -7721,9 +7853,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f630a6370fd8e457873b4bd2ffdae75408bc291ba72be773772a4c2a065d9ae8" +checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" dependencies = [ "proc-macro2", "quote", @@ -7732,9 +7864,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4" +checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" dependencies = [ "itoa", "ryu", @@ -7780,12 +7912,12 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" +checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" dependencies = [ "block-buffer 0.9.0", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "cpuid-bool", "digest 0.9.0", "opaque-debug 0.3.0", @@ -7818,11 +7950,12 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.0.9" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" dependencies = [ "lazy_static", + "loom", ] [[package]] @@ -7833,19 +7966,18 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook-registry" -version = "1.2.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" dependencies = [ - "arc-swap", "libc", ] [[package]] name = "signature" -version = "1.1.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65211b7b6fc3f14ff9fc7a2011a434e3e6880585bd2e9e9396315ae24cbf7852" +checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" [[package]] name = "slab" @@ -7908,15 +8040,15 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" +checksum = "7acad6f34eb9e8a259d3283d1e8c1d34d7415943d4895f65cc73813c7396fc85" [[package]] name = "snow" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32bf8474159a95551661246cda4976e89356999e3cbfef36f493dacc3fae1e8e" +checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" dependencies = [ "aes-gcm", "blake2", @@ -7925,16 +8057,16 @@ dependencies = [ "rand_core 0.5.1", "ring", "rustc_version", - "sha2 0.9.1", - "subtle 2.2.3", - "x25519-dalek 0.6.0", + "sha2 0.9.2", + "subtle 2.3.0", + "x25519-dalek", ] [[package]] name = "socket2" -version = "0.3.12" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +checksum = "7fd8b795c389288baa5f355489c65e71fd48a02104600d15c4cfbc561e9e429d" dependencies = [ "cfg-if 0.1.10", "libc", @@ -7944,18 +8076,18 @@ dependencies = [ [[package]] name = "soketto" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85457366ae0c6ce56bf05a958aef14cd38513c236568618edbcd9a8c52cb80b0" +checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" dependencies = [ "base64 0.12.3", "bytes 0.5.6", "flate2", - "futures 0.3.5", + "futures 0.3.8", "httparse", "log", "rand 0.7.3", - "sha-1 0.8.2", + "sha-1 0.9.2", ] [[package]] @@ -8124,7 +8256,7 @@ dependencies = [ name = "sp-consensus" version = "0.8.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", "log", @@ -8218,7 +8350,7 @@ dependencies = [ "criterion", "dyn-clonable", "ed25519-dalek", - "futures 0.3.5", + "futures 0.3.8", "hash-db", "hash256-std-hasher", "hex", @@ -8315,7 +8447,7 @@ dependencies = [ name = "sp-io" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "hash-db", "libsecp256k1", "log", @@ -8350,7 +8482,7 @@ version = "0.8.0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.8", "merlin", "parity-scale-codec", "parking_lot 0.10.2", @@ -8572,7 +8704,7 @@ dependencies = [ "parking_lot 0.10.2", "pretty_assertions", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.5.0", "sp-core", "sp-externalities", "sp-panic-handler", @@ -8655,7 +8787,7 @@ name = "sp-transaction-pool" version = "2.0.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "log", "parity-scale-codec", "serde", @@ -8686,7 +8818,7 @@ dependencies = [ name = "sp-utils" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-core", "futures-timer 3.0.2", "lazy_static", @@ -8741,23 +8873,14 @@ dependencies = [ "rand 0.5.6", ] -[[package]] -name = "stream-cipher" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f8ed9974042b8c3672ff3030a69fcc03b74c47c3d1ecb7755e8a3626011e88" -dependencies = [ - "generic-array 0.14.3", -] - [[package]] name = "stream-cipher" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" dependencies = [ - "block-cipher 0.8.0", - "generic-array 0.14.3", + "block-cipher", + "generic-array 0.14.4", ] [[package]] @@ -8777,9 +8900,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.15" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c" +checksum = "126d630294ec449fae0b16f964e35bf3c74f940da9dca17ee9b905f7b3112eb8" dependencies = [ "clap", "lazy_static", @@ -8788,9 +8911,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.8" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118" +checksum = "65e51c492f9e23a220534971ff5afc14037289de430e3c83f9daf6a1b6ae91e8" dependencies = [ "heck", "proc-macro-error", @@ -8853,8 +8976,8 @@ dependencies = [ "chrono", "console_error_panic_hook", "console_log", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.8", "futures-timer 3.0.2", "js-sys", "kvdb-web", @@ -8895,14 +9018,14 @@ version = "2.0.0" dependencies = [ "frame-support", "frame-system", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-client-transports", "jsonrpc-core", "parity-scale-codec", "sc-rpc-api", "serde", "sp-storage", - "tokio 0.2.22", + "tokio 0.2.23", ] [[package]] @@ -8910,7 +9033,7 @@ name = "substrate-frame-rpc-system" version = "2.0.0" dependencies = [ "frame-system-rpc-runtime-api", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -8937,18 +9060,18 @@ dependencies = [ "async-std", "derive_more", "futures-util", - "hyper 0.13.7", + "hyper 0.13.9", "log", "prometheus", - "tokio 0.2.22", + "tokio 0.2.23", ] [[package]] name = "substrate-test-client" version = "2.0.0" dependencies = [ - "futures 0.1.29", - "futures 0.3.5", + "futures 0.1.30", + "futures 0.3.8", "hash-db", "hex", "parity-scale-codec", @@ -9017,7 +9140,7 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -9038,7 +9161,7 @@ name = "substrate-test-runtime-transaction-pool" version = "2.0.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "parity-scale-codec", "parking_lot 0.10.2", "sc-transaction-graph", @@ -9052,10 +9175,10 @@ dependencies = [ name = "substrate-test-utils" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "sc-service", "substrate-test-utils-derive", - "tokio 0.2.22", + "tokio 0.2.23", "trybuild", ] @@ -9074,7 +9197,7 @@ version = "0.1.0" dependencies = [ "sc-service", "substrate-test-utils", - "tokio 0.2.22", + "tokio 0.2.23", ] [[package]] @@ -9105,9 +9228,9 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.2.3" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" +checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd" [[package]] name = "syn" @@ -9120,17 +9243,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "syn-mid" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "synstructure" version = "0.12.4" @@ -9171,9 +9283,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +checksum = "bf11676eb135389f21fcda654382c4859bbfc1d2f36e4425a2f829bb41b1e20e" dependencies = [ "winapi-util", ] @@ -9240,11 +9352,12 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] @@ -9260,7 +9373,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.1", + "sha2 0.9.2", "thiserror", "unicode-normalization", "zeroize", @@ -9287,9 +9400,18 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.3" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "b78a366903f506d2ad52ca8dc552102ffdd3e937ba8a227f024dc1d1eae28575" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" @@ -9298,7 +9420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "mio", "num_cpus", "tokio-codec", @@ -9317,9 +9439,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" +checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" dependencies = [ "bytes 0.5.6", "fnv", @@ -9346,7 +9468,7 @@ checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" dependencies = [ "bytes 0.4.12", "either", - "futures 0.1.29", + "futures 0.1.30", ] [[package]] @@ -9356,7 +9478,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "tokio-io", ] @@ -9366,7 +9488,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "tokio-executor", ] @@ -9376,8 +9498,8 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.30", ] [[package]] @@ -9386,7 +9508,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "tokio-io", "tokio-threadpool", ] @@ -9398,15 +9520,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "log", ] [[package]] name = "tokio-macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2", "quote", @@ -9420,7 +9542,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "mio", "mio-named-pipes", "tokio 0.1.22", @@ -9432,8 +9554,8 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.30", "lazy_static", "log", "mio", @@ -9447,13 +9569,13 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" +checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" dependencies = [ "futures-core", "rustls", - "tokio 0.2.22", + "tokio 0.2.23", "webpki", ] @@ -9463,7 +9585,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", ] [[package]] @@ -9473,7 +9595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" dependencies = [ "fnv", - "futures 0.1.29", + "futures 0.1.30", ] [[package]] @@ -9483,7 +9605,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "iovec", "mio", "tokio-io", @@ -9496,10 +9618,10 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" dependencies = [ - "crossbeam-deque", + "crossbeam-deque 0.7.3", "crossbeam-queue", - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.30", "lazy_static", "log", "num_cpus", @@ -9513,8 +9635,8 @@ version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.30", "slab", "tokio-executor", ] @@ -9526,7 +9648,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "log", "mio", "tokio-codec", @@ -9541,7 +9663,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "iovec", "libc", "log", @@ -9563,14 +9685,14 @@ dependencies = [ "futures-sink", "log", "pin-project-lite", - "tokio 0.2.22", + "tokio 0.2.23", ] [[package]] name = "toml" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" dependencies = [ "serde", ] @@ -9620,7 +9742,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" dependencies = [ - "pin-project 0.4.22", + "pin-project 0.4.27", "tracing", ] @@ -9647,9 +9769,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef0a5e15477aa303afbfac3a44cba9b6430fdaad52423b1e6c0dbbe28c3eedd" +checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -9659,7 +9781,7 @@ dependencies = [ "serde", "serde_json", "sharded-slab", - "smallvec 1.4.1", + "smallvec 1.5.0", "thread_local", "tracing", "tracing-core", @@ -9691,15 +9813,15 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f1a9a9252d38c5337cf0c5392988821a5cf1b2103245016968f2ab41de9e38" +checksum = "9e55f7ace33d6237e14137e386f4e1672e2a5c6bbc97fef9f438581a143971f0" dependencies = [ "hash-db", - "hashbrown 0.8.1", + "hashbrown 0.8.2", "log", "rustc-hex", - "smallvec 1.4.1", + "smallvec 1.5.0", ] [[package]] @@ -9721,6 +9843,16 @@ dependencies = [ "keccak-hasher", ] +[[package]] +name = "triehash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f490aa7aa4e4d07edeba442c007e42e3e7f43aafb5112c5b047fff0b1aa5449c" +dependencies = [ + "hash-db", + "rlp", +] + [[package]] name = "try-lock" version = "0.2.3" @@ -9744,11 +9876,13 @@ dependencies = [ [[package]] name = "twox-hash" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" +checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" dependencies = [ + "cfg-if 0.1.10", "rand 0.7.3", + "static_assertions", ] [[package]] @@ -9765,9 +9899,9 @@ checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" [[package]] name = "uint" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "173cd16430c206dc1a430af8a89a0e9c076cf15cb42b4aedb10e8cc8fee73681" +checksum = "9db035e67dfaf7edd9aebfe8676afcd63eed53c8a4044fed514c8cccf1835177" dependencies = [ "byteorder", "crunchy", @@ -9795,18 +9929,18 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +checksum = "db8716a166f290ff49dabc18b44aa407cb7c6dbe1aa0971b44b8a24b0ca35aae" [[package]] name = "unicode-width" @@ -9826,8 +9960,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ - "generic-array 0.14.3", - "subtle 2.2.3", + "generic-array 0.14.4", + "subtle 2.3.0", ] [[package]] @@ -9873,10 +10007,11 @@ dependencies = [ [[package]] name = "url" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" dependencies = [ + "form_urlencoded", "idna 0.2.0", "matches", "percent-encoding 2.1.0", @@ -9923,9 +10058,9 @@ dependencies = [ [[package]] name = "waker-fn" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9571542c2ce85ce642e6b58b3364da2fb53526360dfb7c211add4f5c23105ff7" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" @@ -9944,7 +10079,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "log", "try-lock", ] @@ -9965,6 +10100,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.67" @@ -10074,7 +10215,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "js-sys", "parking_lot 0.9.0", "pin-utils", @@ -10134,7 +10275,7 @@ dependencies = [ "log", "region", "rustc-demangle", - "smallvec 1.4.1", + "smallvec 1.5.0", "target-lexicon", "wasmparser 0.59.0", "wasmtime-environ", @@ -10277,27 +10418,27 @@ dependencies = [ [[package]] name = "wast" -version = "21.0.0" +version = "27.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1844f66a2bc8526d71690104c0e78a8e59ffa1597b7245769d174ebb91deb5" +checksum = "c2c3ef5f6a72dffa44c24d5811123f704e18a1dbc83637d347b1852b41d3835c" dependencies = [ "leb128", ] [[package]] name = "wat" -version = "1.0.22" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce85d72b74242c340e9e3492cfb602652d7bb324c3172dd441b5577e39a2e18c" +checksum = "835cf59c907f67e2bbc20f50157e08f35006fe2a8444d8ec9f5683e22f937045" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.39" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" +checksum = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47" dependencies = [ "js-sys", "wasm-bindgen", @@ -10323,10 +10464,10 @@ dependencies = [ ] [[package]] -name = "wepoll-sys-stjepang" -version = "1.0.6" +name = "wepoll-sys" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd319e971980166b53e17b1026812ad66c6b54063be879eb182342b55284694" +checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" dependencies = [ "cc", ] @@ -10393,17 +10534,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "x25519-dalek" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" -dependencies = [ - "curve25519-dalek 2.1.0", - "rand_core 0.5.1", - "zeroize", -] - [[package]] name = "x25519-dalek" version = "1.1.0" @@ -10421,10 +10551,10 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aeb8c4043cac71c3c299dff107171c220d179492350ea198e109a414981b83c" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "log", "nohash-hasher", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "rand 0.7.3", "static_assertions", ] @@ -10440,9 +10570,9 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" +checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" dependencies = [ "proc-macro2", "quote", diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index 4020fd1029368..abce7daa518bf 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{io, sync::Arc}; +use std::{io, path::PathBuf, sync::Arc}; use kvdb::{KeyValueDB, DBTransaction}; use kvdb_rocksdb::{DatabaseConfig, Database}; @@ -124,7 +124,7 @@ impl Clone for TempDatabase { .map(|f_result| f_result.expect("failed to read file in seed db") .path() - ).collect(); + ).collect::>(); fs_extra::copy_items( &self_db_files, new_dir.path(), diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 32e4bab9773a5..a123da25301d0 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -172,7 +172,7 @@ impl Clone for BenchDb { .map(|f_result| f_result.expect("failed to read file in seed db") .path() - ).collect(); + ).collect::>(); fs_extra::copy_items( &seed_db_files, dir.path(), From 5f17393cdd9748993b8370d7f166d9538ccdb29c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 20 Nov 2020 14:34:19 +0100 Subject: [PATCH 26/34] .github: Add dependabot config and thus enable dependabot (#7509) * .github: Add dependabot config and thus enable dependabot * Update .github/dependabot.yml Co-authored-by: Pierre Krieger Co-authored-by: Pierre Krieger --- .github/dependabot.yml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..d782bb80f7539 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + labels: ["A2-insubstantial", "B0-silent", "C1-low"] + schedule: + interval: "daily" From c7ee055c7ba09a6db2d7f986c03fdf956118c20d Mon Sep 17 00:00:00 2001 From: Parity Benchmarking Bot Date: Fri, 20 Nov 2020 17:44:32 +0000 Subject: [PATCH 27/34] cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_treasury --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/treasury/src/weights.rs --template=./.maintain/frame-weight-template.hbs --- frame/treasury/src/weights.rs | 258 ++-------------------------------- 1 file changed, 13 insertions(+), 245 deletions(-) diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index 646b9869f47ef..a3f407523f1ca 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_treasury +//! Autogenerated weights for pallet_treasury +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-11-20, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -46,293 +47,60 @@ pub trait WeightInfo { fn propose_spend() -> Weight; fn reject_proposal() -> Weight; fn approve_proposal() -> Weight; - fn report_awesome(r: u32, ) -> Weight; - fn retract_tip() -> Weight; - fn tip_new(r: u32, t: u32, ) -> Weight; - fn tip(t: u32, ) -> Weight; - fn close_tip(t: u32, ) -> Weight; - fn propose_bounty(d: u32, ) -> Weight; - fn approve_bounty() -> Weight; - fn propose_curator() -> Weight; - fn unassign_curator() -> Weight; - fn accept_curator() -> Weight; - fn award_bounty() -> Weight; - fn claim_bounty() -> Weight; - fn close_bounty_proposed() -> Weight; - fn close_bounty_active() -> Weight; - fn extend_bounty_expiry() -> Weight; fn on_initialize_proposals(p: u32, ) -> Weight; - fn on_initialize_bounties(b: u32, ) -> Weight; - } /// Weights for pallet_treasury using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn propose_spend() -> Weight { - (56_844_000 as Weight) + (55_957_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn reject_proposal() -> Weight { - (46_098_000 as Weight) + (45_616_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn approve_proposal() -> Weight { - (13_622_000 as Weight) + (13_362_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn retract_tip() -> Weight { - (60_150_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } - fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - - } - fn approve_bounty() -> Weight { - (17_337_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn propose_curator() -> Weight { - (14_068_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn unassign_curator() -> Weight { - (49_717_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn accept_curator() -> Weight { - (50_596_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn award_bounty() -> Weight { - (36_030_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - - } - fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } - fn close_bounty_active() -> Weight { - (110_959_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - - } - fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) + (74_689_000 as Weight) + .saturating_add((71_943_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn propose_spend() -> Weight { - (56_844_000 as Weight) + (55_957_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn reject_proposal() -> Weight { - (46_098_000 as Weight) + (45_616_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn approve_proposal() -> Weight { - (13_622_000 as Weight) + (13_362_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn retract_tip() -> Weight { - (60_150_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } - fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - - } - fn approve_bounty() -> Weight { - (17_337_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn propose_curator() -> Weight { - (14_068_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn unassign_curator() -> Weight { - (49_717_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn accept_curator() -> Weight { - (50_596_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn award_bounty() -> Weight { - (36_030_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - - } - fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } - fn close_bounty_active() -> Weight { - (110_959_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - - } - fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) + (74_689_000 as Weight) + .saturating_add((71_943_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) - } - } From da9418e2e9ee20b38ff174c6216df5ee69507fd5 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 20 Nov 2020 18:54:05 +0100 Subject: [PATCH 28/34] Update lib.rs --- bin/node/runtime/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5c974f9af9a8e..0ab77c592a372 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1208,6 +1208,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_bounties, Bounties); add_benchmark!(params, batches, pallet_collective, Council); add_benchmark!(params, batches, pallet_contracts, Contracts); add_benchmark!(params, batches, pallet_democracy, Democracy); @@ -1224,6 +1225,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_staking, Staking); add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_tips, Tips); add_benchmark!(params, batches, pallet_treasury, Treasury); add_benchmark!(params, batches, pallet_utility, Utility); add_benchmark!(params, batches, pallet_vesting, Vesting); From faad534b30d78a7e3ff8979cb839635a34680c5b Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Fri, 20 Nov 2020 18:54:19 +0100 Subject: [PATCH 29/34] Thread-local parameter_types for testing. (#7542) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Thread-local parameter_types for testing. * Better docs. * Some minors * Merge'em * Update frame/support/src/lib.rs Co-authored-by: Bastian Köcher * Align more to basti's trick * Update frame/support/src/lib.rs * Update frame/support/src/lib.rs Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- frame/balances/src/tests_composite.rs | 12 +--- frame/balances/src/tests_local.rs | 13 +--- frame/contracts/src/tests.rs | 13 +--- frame/democracy/src/tests.rs | 16 +---- frame/elections-phragmen/src/lib.rs | 38 ++--------- frame/elections/src/mock.rs | 46 ++++---------- frame/elections/src/tests.rs | 12 ++-- frame/membership/src/lib.rs | 8 +-- frame/staking/src/mock.rs | 62 +++--------------- frame/support/src/lib.rs | 92 ++++++++++++++++++++++++--- frame/transaction-payment/src/lib.rs | 23 +------ frame/vesting/src/lib.rs | 10 +-- 12 files changed, 128 insertions(+), 217 deletions(-) diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 88b73b47273eb..fd4ba1fd3c305 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -27,10 +27,8 @@ use sp_runtime::{ use sp_core::H256; use sp_io; use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; -use frame_support::traits::Get; use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; use pallet_transaction_payment::CurrencyAdapter; -use std::cell::RefCell; use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; use frame_system as system; @@ -49,15 +47,6 @@ impl_outer_event! { } } -thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); -} - -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } -} - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Test; @@ -66,6 +55,7 @@ parameter_types! { pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub static ExistentialDeposit: u64 = 0; } impl frame_system::Trait for Test { type BaseCallFilter = (); diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 319fb3640b4c7..c0a5d23ff1a41 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -27,9 +27,8 @@ use sp_runtime::{ use sp_core::H256; use sp_io; use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; -use frame_support::traits::{Get, StorageMapShim}; +use frame_support::traits::StorageMapShim; use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; -use std::cell::RefCell; use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; use pallet_transaction_payment::CurrencyAdapter; @@ -49,15 +48,6 @@ impl_outer_event! { } } -thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); -} - -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } -} - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Test; @@ -66,6 +56,7 @@ parameter_types! { pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub static ExistentialDeposit: u64 = 0; } impl frame_system::Trait for Test { type BaseCallFilter = (); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index c2d9ed6642559..05e46a3ab1585 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -30,11 +30,10 @@ use sp_runtime::{ use frame_support::{ assert_ok, assert_err_ignore_postinfo, impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, StorageMap, StorageValue, - traits::{Currency, Get, ReservableCurrency}, + traits::{Currency, ReservableCurrency}, weights::{Weight, PostDispatchInfo}, dispatch::DispatchErrorWithPostInfo, }; -use std::cell::RefCell; use frame_system::{self as system, EventRecord, Phase}; mod contracts { @@ -99,15 +98,6 @@ pub mod test_utils { } } -thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); -} - -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } -} - #[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; parameter_types! { @@ -115,6 +105,7 @@ parameter_types! { pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub static ExistentialDeposit: u64 = 0; } impl frame_system::Trait for Test { type BaseCallFilter = (); diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index bcc7099bb34a4..25209901109fa 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -18,7 +18,6 @@ //! The crate's tests. use super::*; -use std::cell::RefCell; use codec::Encode; use frame_support::{ impl_outer_origin, impl_outer_dispatch, assert_noop, assert_ok, parameter_types, @@ -154,6 +153,8 @@ parameter_types! { pub const CooloffPeriod: u64 = 2; pub const MaxVotes: u32 = 100; pub const MaxProposals: u32 = MAX_PROPOSALS; + pub static PreimageByteDeposit: u64 = 0; + pub static InstantAllowed: bool = false; } ord_parameter_types! { pub const One: u64 = 1; @@ -171,18 +172,7 @@ impl Contains for OneToFive { #[cfg(feature = "runtime-benchmarks")] fn add(_m: &u64) {} } -thread_local! { - static PREIMAGE_BYTE_DEPOSIT: RefCell = RefCell::new(0); - static INSTANT_ALLOWED: RefCell = RefCell::new(false); -} -pub struct PreimageByteDeposit; -impl Get for PreimageByteDeposit { - fn get() -> u64 { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow()) } -} -pub struct InstantAllowed; -impl Get for InstantAllowed { - fn get() -> bool { INSTANT_ALLOWED.with(|v| *v.borrow()) } -} + impl super::Trait for Test { type Proposal = Call; type Event = Event; diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index be47b5adcce5e..8279f9cf11f16 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1058,7 +1058,6 @@ impl ContainsLengthBound for Module { #[cfg(test)] mod tests { use super::*; - use std::cell::RefCell; use frame_support::{assert_ok, assert_noop, assert_err_with_weight, parameter_types, weights::Weight, }; @@ -1123,36 +1122,13 @@ mod tests { pub const CandidacyBond: u64 = 3; } - thread_local! { - static VOTING_BOND: RefCell = RefCell::new(2); - static DESIRED_MEMBERS: RefCell = RefCell::new(2); - static DESIRED_RUNNERS_UP: RefCell = RefCell::new(2); - static TERM_DURATION: RefCell = RefCell::new(5); - } - - pub struct VotingBond; - impl Get for VotingBond { - fn get() -> u64 { VOTING_BOND.with(|v| *v.borrow()) } - } - - pub struct DesiredMembers; - impl Get for DesiredMembers { - fn get() -> u32 { DESIRED_MEMBERS.with(|v| *v.borrow()) } - } - - pub struct DesiredRunnersUp; - impl Get for DesiredRunnersUp { - fn get() -> u32 { DESIRED_RUNNERS_UP.with(|v| *v.borrow()) } - } - - pub struct TermDuration; - impl Get for TermDuration { - fn get() -> u64 { TERM_DURATION.with(|v| *v.borrow()) } - } - - thread_local! { - pub static MEMBERS: RefCell> = RefCell::new(vec![]); - pub static PRIME: RefCell> = RefCell::new(None); + frame_support::parameter_types! { + pub static VotingBond: u64 = 2; + pub static DesiredMembers: u32 = 2; + pub static DesiredRunnersUp: u32 = 2; + pub static TermDuration: u64 = 5; + pub static Members: Vec = vec![]; + pub static Prime: Option = None; } pub struct TestChangeMembers; diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index deec77da7b837..0d57089af5ef8 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -19,10 +19,9 @@ #![cfg(test)] -use std::cell::RefCell; use frame_support::{ StorageValue, StorageMap, parameter_types, assert_ok, - traits::{Get, ChangeMembers, Currency, LockIdentifier}, + traits::{ChangeMembers, Currency, LockIdentifier}, weights::Weight, }; use sp_core::H256; @@ -85,34 +84,11 @@ parameter_types! { pub const InactiveGracePeriod: u32 = 1; pub const VotingPeriod: u64 = 4; pub const MinimumVotingLock: u64 = 5; -} - -thread_local! { - static VOTER_BOND: RefCell = RefCell::new(0); - static VOTING_FEE: RefCell = RefCell::new(0); - static PRESENT_SLASH_PER_VOTER: RefCell = RefCell::new(0); - static DECAY_RATIO: RefCell = RefCell::new(0); - static MEMBERS: RefCell> = RefCell::new(vec![]); -} - -pub struct VotingBond; -impl Get for VotingBond { - fn get() -> u64 { VOTER_BOND.with(|v| *v.borrow()) } -} - -pub struct VotingFee; -impl Get for VotingFee { - fn get() -> u64 { VOTING_FEE.with(|v| *v.borrow()) } -} - -pub struct PresentSlashPerVoter; -impl Get for PresentSlashPerVoter { - fn get() -> u64 { PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow()) } -} - -pub struct DecayRatio; -impl Get for DecayRatio { - fn get() -> u32 { DECAY_RATIO.with(|v| *v.borrow()) } + pub static VotingBond: u64 = 0; + pub static VotingFee: u64 = 0; + pub static PresentSlashPerVoter: u64 = 0; + pub static DecayRatio: u32 = 0; + pub static Members: Vec = vec![]; } pub struct TestChangeMembers; @@ -175,7 +151,7 @@ pub struct ExtBuilder { decay_ratio: u32, desired_seats: u32, voting_fee: u64, - voter_bond: u64, + voting_bond: u64, bad_presentation_punishment: u64, } @@ -186,7 +162,7 @@ impl Default for ExtBuilder { decay_ratio: 24, desired_seats: 2, voting_fee: 0, - voter_bond: 0, + voting_bond: 0, bad_presentation_punishment: 1, } } @@ -209,8 +185,8 @@ impl ExtBuilder { self.bad_presentation_punishment = fee; self } - pub fn voter_bond(mut self, fee: u64) -> Self { - self.voter_bond = fee; + pub fn voting_bond(mut self, fee: u64) -> Self { + self.voting_bond = fee; self } pub fn desired_seats(mut self, seats: u32) -> Self { @@ -218,7 +194,7 @@ impl ExtBuilder { self } pub fn build(self) -> sp_io::TestExternalities { - VOTER_BOND.with(|v| *v.borrow_mut() = self.voter_bond); + VOTING_BOND.with(|v| *v.borrow_mut() = self.voting_bond); VOTING_FEE.with(|v| *v.borrow_mut() = self.voting_fee); PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow_mut() = self.bad_presentation_punishment); DECAY_RATIO.with(|v| *v.borrow_mut() = self.decay_ratio); diff --git a/frame/elections/src/tests.rs b/frame/elections/src/tests.rs index 92f6e11252b05..d3579ca337436 100644 --- a/frame/elections/src/tests.rs +++ b/frame/elections/src/tests.rs @@ -298,7 +298,7 @@ fn voting_initial_set_approvals_ignores_voter_index() { } #[test] fn voting_bad_approval_index_slashes_voters_and_bond_reduces_stake() { - ExtBuilder::default().voting_fee(5).voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_fee(5).voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); (1..=63).for_each(|i| vote(i, 0)); @@ -365,7 +365,7 @@ fn voting_cannot_lock_less_than_limit() { #[test] fn voting_locking_more_than_total_balance_is_moot() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); assert_eq!(balances(&3), (30, 0)); @@ -381,7 +381,7 @@ fn voting_locking_more_than_total_balance_is_moot() { #[test] fn voting_locking_stake_and_reserving_bond_works() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); assert_eq!(balances(&2), (20, 0)); @@ -558,7 +558,7 @@ fn retracting_inactive_voter_should_work() { #[test] fn retracting_inactive_voter_with_other_candidates_in_slots_should_work() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { System::set_block_number(4); assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); @@ -1107,7 +1107,7 @@ fn election_present_when_presenter_is_poor_should_not_work() { let test_present = |p| { ExtBuilder::default() .voting_fee(5) - .voter_bond(2) + .voting_bond(2) .bad_presentation_punishment(p) .build() .execute_with(|| { @@ -1507,7 +1507,7 @@ fn pot_winning_resets_accumulated_pot() { #[test] fn pot_resubmitting_approvals_stores_pot() { ExtBuilder::default() - .voter_bond(0) + .voting_bond(0) .voting_fee(0) .balance_factor(10) .build() diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 492fda88dd170..06188c42b21bd 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -278,7 +278,6 @@ impl, I: Instance> Contains for Module { mod tests { use super::*; - use std::cell::RefCell; use frame_support::{ assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, ord_parameter_types @@ -298,6 +297,8 @@ mod tests { pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub static Members: Vec = vec![]; + pub static Prime: Option = None; } impl frame_system::Trait for Test { type BaseCallFilter = (); @@ -334,11 +335,6 @@ mod tests { pub const Five: u64 = 5; } - thread_local! { - static MEMBERS: RefCell> = RefCell::new(vec![]); - static PRIME: RefCell> = RefCell::new(None); - } - pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 055ebb9730805..3aa3e9ae03d74 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -47,12 +47,6 @@ pub(crate) type Balance = u128; thread_local! { static SESSION: RefCell<(Vec, HashSet)> = RefCell::new(Default::default()); - static SESSION_PER_ERA: RefCell = RefCell::new(3); - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - static SLASH_DEFER_DURATION: RefCell = RefCell::new(0); - static ELECTION_LOOKAHEAD: RefCell = RefCell::new(0); - static PERIOD: RefCell = RefCell::new(1); - static MAX_ITERATIONS: RefCell = RefCell::new(0); } /// Another session handler struct to test on_disabled. @@ -92,53 +86,6 @@ pub fn is_disabled(controller: AccountId) -> bool { SESSION.with(|d| d.borrow().1.contains(&stash)) } -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> Balance { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) - } -} - -pub struct SessionsPerEra; -impl Get for SessionsPerEra { - fn get() -> SessionIndex { - SESSION_PER_ERA.with(|v| *v.borrow()) - } -} -impl Get for SessionsPerEra { - fn get() -> BlockNumber { - SESSION_PER_ERA.with(|v| *v.borrow() as BlockNumber) - } -} - -pub struct ElectionLookahead; -impl Get for ElectionLookahead { - fn get() -> BlockNumber { - ELECTION_LOOKAHEAD.with(|v| *v.borrow()) - } -} - -pub struct Period; -impl Get for Period { - fn get() -> BlockNumber { - PERIOD.with(|v| *v.borrow()) - } -} - -pub struct SlashDeferDuration; -impl Get for SlashDeferDuration { - fn get() -> EraIndex { - SLASH_DEFER_DURATION.with(|v| *v.borrow()) - } -} - -pub struct MaxIterations; -impl Get for MaxIterations { - fn get() -> u32 { - MAX_ITERATIONS.with(|v| *v.borrow()) - } -} - impl_outer_origin! { pub enum Origin for Test where system = frame_system {} } @@ -186,7 +133,14 @@ parameter_types! { pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const MaxLocks: u32 = 1024; + pub static SessionsPerEra: SessionIndex = 3; + pub static ExistentialDeposit: Balance = 0; + pub static SlashDeferDuration: EraIndex = 0; + pub static ElectionLookahead: BlockNumber = 0; + pub static Period: BlockNumber = 1; + pub static MaxIterations: u32 = 0; } + impl frame_system::Trait for Test { type BaseCallFilter = (); type Origin = Origin; @@ -437,7 +391,7 @@ impl ExtBuilder { pub fn set_associated_constants(&self) { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); SLASH_DEFER_DURATION.with(|v| *v.borrow_mut() = self.slash_defer_duration); - SESSION_PER_ERA.with(|v| *v.borrow_mut() = self.session_per_era); + SESSIONS_PER_ERA.with(|v| *v.borrow_mut() = self.session_per_era); ELECTION_LOOKAHEAD.with(|v| *v.borrow_mut() = self.election_lookahead); PERIOD.with(|v| *v.borrow_mut() = self.session_length); MAX_ITERATIONS.with(|v| *v.borrow_mut() = self.max_offchain_iterations); diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index a132b787fd9bf..5dd452dbbe7b9 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -85,21 +85,27 @@ pub enum Never {} /// Create new implementations of the [`Get`](crate::traits::Get) trait. /// -/// The so-called parameter type can be created in three different ways: +/// The so-called parameter type can be created in four different ways: /// -/// - Using `const` to create a parameter type that provides a `const` getter. -/// It is required that the `value` is const. +/// - Using `const` to create a parameter type that provides a `const` getter. It is required that +/// the `value` is const. /// /// - Declare the parameter type without `const` to have more freedom when creating the value. /// -/// - Using `storage` to create a storage parameter type. This type is special as it tries to -/// load the value from the storage under a fixed key. If the value could not be found in the -/// storage, the given default value will be returned. It is required that the value implements -/// [`Encode`](codec::Encode) and [`Decode`](codec::Decode). The key for looking up the value -/// in the storage is built using the following formular: +/// - Using `storage` to create a storage parameter type. This type is special as it tries to load +/// the value from the storage under a fixed key. If the value could not be found in the storage, +/// the given default value will be returned. It is required that the value implements +/// [`Encode`](codec::Encode) and [`Decode`](codec::Decode). The key for looking up the value in +/// the storage is built using the following formula: /// /// `twox_128(":" ++ NAME ++ ":")` where `NAME` is the name that is passed as type name. /// +/// - Using `static` to create a static parameter type. Its value is +/// being provided by a static variable with the equivalent name in `UPPER_SNAKE_CASE`. An +/// additional `set` function is provided in this case to alter the static variable. +/// +/// **This is intended for testing ONLY and is ONLY available when `std` is enabled** +/// /// # Examples /// /// ``` @@ -114,12 +120,14 @@ pub enum Never {} /// /// Visibility of the type is optional /// OtherArgument: u64 = non_const_expression(); /// pub storage StorageArgument: u64 = 5; +/// pub static StaticArgument: u32 = 7; /// } /// /// trait Config { /// type Parameter: Get; /// type OtherParameter: Get; /// type StorageParameter: Get; +/// type StaticParameter: Get; /// } /// /// struct Runtime; @@ -127,7 +135,10 @@ pub enum Never {} /// type Parameter = Argument; /// type OtherParameter = OtherArgument; /// type StorageParameter = StorageArgument; +/// type StaticParameter = StaticArgument; /// } +/// +/// // In testing, `StaticArgument` can be altered later: `StaticArgument::set(8)`. /// ``` /// /// # Invalid example: @@ -142,7 +153,6 @@ pub enum Never {} /// pub const Argument: u64 = non_const_expression(); /// } /// ``` - #[macro_export] macro_rules! parameter_types { ( @@ -235,7 +245,69 @@ macro_rules! parameter_types { I::from(Self::get()) } } - } + }; + ( + $( + $( #[ $attr:meta ] )* + $vis:vis static $name:ident: $type:ty = $value:expr; + )* + ) => ( + $crate::parameter_types_impl_thread_local!( + $( + $( #[ $attr ] )* + $vis static $name: $type = $value; + )* + ); + ); +} + +#[cfg(not(feature = "std"))] +#[macro_export] +macro_rules! parameter_types_impl_thread_local { + ( $( $any:tt )* ) => { + compile_error!("static parameter types is only available in std and for testing."); + }; +} + +#[cfg(feature = "std")] +#[macro_export] +macro_rules! parameter_types_impl_thread_local { + ( + $( + $( #[ $attr:meta ] )* + $vis:vis static $name:ident: $type:ty = $value:expr; + )* + ) => { + $crate::parameter_types_impl_thread_local!( + IMPL_THREAD_LOCAL $( $vis, $name, $type, $value, )* + ); + $crate::paste::item! { + $crate::parameter_types!( + $( + $( #[ $attr ] )* + $vis $name: $type = [<$name:snake:upper>].with(|v| v.borrow().clone()); + )* + ); + $( + impl $name { + /// Set the internal value. + pub fn set(t: $type) { + [<$name:snake:upper>].with(|v| *v.borrow_mut() = t); + } + } + )* + } + }; + (IMPL_THREAD_LOCAL $( $vis:vis, $name:ident, $type:ty, $value:expr, )* ) => { + $crate::paste::item! { + thread_local! { + $( + pub static [<$name:snake:upper>]: std::cell::RefCell<$type> = + std::cell::RefCell::new($value); + )* + } + } + }; } /// Macro for easily creating a new implementation of both the `Get` and `Contains` traits. Use diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index dd310c2639842..751aa57da0f80 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -571,7 +571,6 @@ mod tests { traits::{BlakeTwo256, IdentityLookup}, Perbill, }; - use std::cell::RefCell; use smallvec::smallvec; const CALL: &::Call = @@ -599,20 +598,14 @@ mod tests { pub enum Origin for Runtime {} } - thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); - } - - pub struct ExtrinsicBaseWeight; - impl Get for ExtrinsicBaseWeight { - fn get() -> u64 { EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()) } - } - parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub static ExtrinsicBaseWeight: u64 = 0; + pub static TransactionByteFee: u64 = 1; + pub static WeightToFee: u64 = 1; } impl frame_system::Trait for Runtime { @@ -656,17 +649,7 @@ mod tests { type MaxLocks = (); type WeightInfo = (); } - thread_local! { - static TRANSACTION_BYTE_FEE: RefCell = RefCell::new(1); - static WEIGHT_TO_FEE: RefCell = RefCell::new(1); - } - - pub struct TransactionByteFee; - impl Get for TransactionByteFee { - fn get() -> u64 { TRANSACTION_BYTE_FEE.with(|v| *v.borrow()) } - } - pub struct WeightToFee; impl WeightToFeePolynomial for WeightToFee { type Balance = u64; diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 8b78eac4fedf8..c09516c2cc27c 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -390,10 +390,8 @@ impl VestingSchedule for Module where mod tests { use super::*; - use std::cell::RefCell; use frame_support::{ assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - traits::Get }; use sp_core::H256; use sp_runtime::{ @@ -456,6 +454,7 @@ mod tests { } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; + pub static ExistentialDeposit: u64 = 0; } impl Trait for Test { type Event = (); @@ -468,13 +467,6 @@ mod tests { type Balances = pallet_balances::Module; type Vesting = Module; - thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - } - pub struct ExistentialDeposit; - impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } - } pub struct ExtBuilder { existential_deposit: u64, From 9524f6913cd1513323d6f4a51905b64def379a55 Mon Sep 17 00:00:00 2001 From: Parity Benchmarking Bot Date: Fri, 20 Nov 2020 17:58:31 +0000 Subject: [PATCH 30/34] cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_tips --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/tips/src/weights.rs --template=./.maintain/frame-weight-template.hbs --- frame/tips/src/weights.rs | 263 ++++---------------------------------- 1 file changed, 23 insertions(+), 240 deletions(-) diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index 646b9869f47ef..f0e9aba44d540 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_treasury +//! Autogenerated weights for pallet_tips +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-11-20, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -26,12 +27,12 @@ // --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_treasury +// --pallet=pallet_tips // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/treasury/src/weights.rs +// --output=./frame/tips/src/weights.rs // --template=./.maintain/frame-weight-template.hbs @@ -41,298 +42,80 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; -/// Weight functions needed for pallet_treasury. +/// Weight functions needed for pallet_tips. pub trait WeightInfo { - fn propose_spend() -> Weight; - fn reject_proposal() -> Weight; - fn approve_proposal() -> Weight; fn report_awesome(r: u32, ) -> Weight; fn retract_tip() -> Weight; fn tip_new(r: u32, t: u32, ) -> Weight; fn tip(t: u32, ) -> Weight; fn close_tip(t: u32, ) -> Weight; - fn propose_bounty(d: u32, ) -> Weight; - fn approve_bounty() -> Weight; - fn propose_curator() -> Weight; - fn unassign_curator() -> Weight; - fn accept_curator() -> Weight; - fn award_bounty() -> Weight; - fn claim_bounty() -> Weight; - fn close_bounty_proposed() -> Weight; - fn close_bounty_active() -> Weight; - fn extend_bounty_expiry() -> Weight; - fn on_initialize_proposals(p: u32, ) -> Weight; - fn on_initialize_bounties(b: u32, ) -> Weight; - } -/// Weights for pallet_treasury using the Substrate node and recommended hardware. +/// Weights for pallet_tips using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - fn propose_spend() -> Weight { - (56_844_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn reject_proposal() -> Weight { - (46_098_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn approve_proposal() -> Weight { - (13_622_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) + (70_338_000 as Weight) .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn retract_tip() -> Weight { - (60_150_000 as Weight) + (59_051_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) + (41_984_000 as Weight) .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((180_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) + (33_313_000 as Weight) + .saturating_add((700_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) + (110_781_000 as Weight) + .saturating_add((364_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } - fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - - } - fn approve_bounty() -> Weight { - (17_337_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn propose_curator() -> Weight { - (14_068_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn unassign_curator() -> Weight { - (49_717_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn accept_curator() -> Weight { - (50_596_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn award_bounty() -> Weight { - (36_030_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - - } - fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } - fn close_bounty_active() -> Weight { - (110_959_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - - } - fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) - } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } - } // For backwards compatibility and tests impl WeightInfo for () { - fn propose_spend() -> Weight { - (56_844_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn reject_proposal() -> Weight { - (46_098_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn approve_proposal() -> Weight { - (13_622_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) + (70_338_000 as Weight) .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn retract_tip() -> Weight { - (60_150_000 as Weight) + (59_051_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) + (41_984_000 as Weight) .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((180_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) + (33_313_000 as Weight) + .saturating_add((700_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) + (110_781_000 as Weight) + .saturating_add((364_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } - fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - - } - fn approve_bounty() -> Weight { - (17_337_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn propose_curator() -> Weight { - (14_068_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn unassign_curator() -> Weight { - (49_717_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn accept_curator() -> Weight { - (50_596_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn award_bounty() -> Weight { - (36_030_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - - } - fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } - fn close_bounty_active() -> Weight { - (110_959_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - - } - fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) - } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } - } From c0e3f344cd1e9719f09fce4a109c2375604812c0 Mon Sep 17 00:00:00 2001 From: Parity Benchmarking Bot Date: Fri, 20 Nov 2020 18:02:30 +0000 Subject: [PATCH 31/34] cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_bounties --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/bounties/src/weights.rs --template=./.maintain/frame-weight-template.hbs --- frame/bounties/src/weights.rs | 236 +++++----------------------------- 1 file changed, 33 insertions(+), 203 deletions(-) diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index 646b9869f47ef..ca791e4b9a2d2 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_treasury +//! Autogenerated weights for pallet_bounties +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-11-20, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -26,12 +27,12 @@ // --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_treasury +// --pallet=pallet_bounties // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/treasury/src/weights.rs +// --output=./frame/bounties/src/weights.rs // --template=./.maintain/frame-weight-template.hbs @@ -41,16 +42,8 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; -/// Weight functions needed for pallet_treasury. +/// Weight functions needed for pallet_bounties. pub trait WeightInfo { - fn propose_spend() -> Weight; - fn reject_proposal() -> Weight; - fn approve_proposal() -> Weight; - fn report_awesome(r: u32, ) -> Weight; - fn retract_tip() -> Weight; - fn tip_new(r: u32, t: u32, ) -> Weight; - fn tip(t: u32, ) -> Weight; - fn close_tip(t: u32, ) -> Weight; fn propose_bounty(d: u32, ) -> Weight; fn approve_bounty() -> Weight; fn propose_curator() -> Weight; @@ -61,278 +54,115 @@ pub trait WeightInfo { fn close_bounty_proposed() -> Weight; fn close_bounty_active() -> Weight; fn extend_bounty_expiry() -> Weight; - fn on_initialize_proposals(p: u32, ) -> Weight; - fn on_initialize_bounties(b: u32, ) -> Weight; - } -/// Weights for pallet_treasury using the Substrate node and recommended hardware. +/// Weights for pallet_bounties using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - fn propose_spend() -> Weight { - (56_844_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn reject_proposal() -> Weight { - (46_098_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn approve_proposal() -> Weight { - (13_622_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn retract_tip() -> Weight { - (60_150_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) + (61_811_000 as Weight) .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } fn approve_bounty() -> Weight { - (17_337_000 as Weight) + (17_579_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn propose_curator() -> Weight { - (14_068_000 as Weight) + (13_722_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn unassign_curator() -> Weight { - (49_717_000 as Weight) + (49_951_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn accept_curator() -> Weight { - (50_596_000 as Weight) + (50_197_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn award_bounty() -> Weight { - (36_030_000 as Weight) + (35_999_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - + (67_197_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) + (49_043_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn close_bounty_active() -> Weight { - (110_959_000 as Weight) + (63_202_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) + (34_943_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) - } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } - } // For backwards compatibility and tests impl WeightInfo for () { - fn propose_spend() -> Weight { - (56_844_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn reject_proposal() -> Weight { - (46_098_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn approve_proposal() -> Weight { - (13_622_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn retract_tip() -> Weight { - (60_150_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) + (61_811_000 as Weight) .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } fn approve_bounty() -> Weight { - (17_337_000 as Weight) + (17_579_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn propose_curator() -> Weight { - (14_068_000 as Weight) + (13_722_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn unassign_curator() -> Weight { - (49_717_000 as Weight) + (49_951_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn accept_curator() -> Weight { - (50_596_000 as Weight) + (50_197_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn award_bounty() -> Weight { - (36_030_000 as Weight) + (35_999_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - + (67_197_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) + (49_043_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn close_bounty_active() -> Weight { - (110_959_000 as Weight) + (63_202_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) + (34_943_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) - } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } - } From 03e0eeee02950168113a766e563351b7764fedb5 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 20 Nov 2020 19:38:20 +0100 Subject: [PATCH 32/34] add back `on_initialize_bounties` --- frame/bounties/src/benchmarking.rs | 8 ++++++++ frame/bounties/src/weights.rs | 17 +++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 48642a08fdad9..07e6fabac3bca 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -177,6 +177,13 @@ benchmarks! { let curator = T::Lookup::lookup(curator_lookup)?; }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) + on_initialize_bounties { + let b in 0 .. 100; + setup_pod_account::(); + create_approved_bounties::(b)?; + }: { + Treasury::::on_initialize(T::BlockNumber::zero()); + } } #[cfg(test)] @@ -198,6 +205,7 @@ mod tests { assert_ok!(test_benchmark_close_bounty_proposed::()); assert_ok!(test_benchmark_close_bounty_active::()); assert_ok!(test_benchmark_extend_bounty_expiry::()); + assert_ok!(test_benchmark_on_initialize_bounties::()); }); } } diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index ca791e4b9a2d2..4b06458bd83e3 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -54,6 +54,7 @@ pub trait WeightInfo { fn close_bounty_proposed() -> Weight; fn close_bounty_active() -> Weight; fn extend_bounty_expiry() -> Weight; + fn on_initialize_bounties(b: u32, ) -> Weight; } /// Weights for pallet_bounties using the Substrate node and recommended hardware. @@ -110,6 +111,14 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + fn on_initialize_bounties(b: u32, ) -> Weight { + (75_165_000 as Weight) + .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } } // For backwards compatibility and tests @@ -165,4 +174,12 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + fn on_initialize_bounties(b: u32, ) -> Weight { + (75_165_000 as Weight) + .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } } From b694ef37a5005ca0ef053c0841e9d3199230b4a9 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 20 Nov 2020 20:11:04 +0100 Subject: [PATCH 33/34] patch up bounties benchmarks --- frame/bounties/src/benchmarking.rs | 66 ++++++++++++++++++++++-------- frame/bounties/src/lib.rs | 14 +++---- frame/bounties/src/tests.rs | 22 ---------- frame/bounties/src/weights.rs | 6 +-- 4 files changed, 58 insertions(+), 50 deletions(-) diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 07e6fabac3bca..e6673f6f59103 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -21,7 +21,8 @@ use super::*; -use frame_system::RawOrigin; +use sp_runtime::traits::Bounded; +use frame_system::{EventRecord, RawOrigin}; use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use frame_support::traits::OnInitialize; @@ -29,17 +30,17 @@ use crate::Module as Bounties; const SEED: u32 = 0; -// // Create bounties that are approved for use in `on_initialize`. -// fn create_approved_bounties, I: Instance>(n: u32) -> Result<(), &'static str> { -// for i in 0 .. n { -// let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); -// Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; -// let bounty_id = BountyCount::get() - 1; -// Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; -// } -// ensure!(BountyApprovals::get().len() == n as usize, "Not all bounty approved"); -// Ok(()) -// } +// Create bounties that are approved for use in `on_initialize`. +fn create_approved_bounties(n: u32) -> Result<(), &'static str> { + for i in 0 .. n { + let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + } + ensure!(BountyApprovals::get().len() == n as usize, "Not all bounty approved"); + Ok(()) +} // Create the pre-requisite information needed to create a treasury `propose_bounty`. fn setup_bounty(u: u32, d: u32) -> ( @@ -81,6 +82,14 @@ fn setup_pod_account() { let _ = T::Currency::make_free_balance_be(&pot_account, value); } +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + const MAX_BYTES: u32 = 16384; benchmarks! { @@ -167,6 +176,9 @@ benchmarks! { Bounties::::on_initialize(T::BlockNumber::zero()); let bounty_id = BountyCount::get() - 1; }: close_bounty(RawOrigin::Root, bounty_id) + verify { + assert_last_event::(RawEvent::BountyCanceled(bounty_id).into()) + } extend_bounty_expiry { setup_pod_account::(); @@ -176,13 +188,31 @@ benchmarks! { let bounty_id = BountyCount::get() - 1; let curator = T::Lookup::lookup(curator_lookup)?; }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) + verify { + assert_last_event::(RawEvent::BountyExtended(bounty_id).into()) + } - on_initialize_bounties { - let b in 0 .. 100; - setup_pod_account::(); - create_approved_bounties::(b)?; + spend_funds { + let b in 1 .. 100; + setup_pod_account::(); + create_approved_bounties::(b)?; + + let mut budget_remaining = BalanceOf::::max_value(); + let mut imbalance = PositiveImbalanceOf::::zero(); + let mut total_weight = Weight::zero(); + let mut missed_any = false; }: { - Treasury::::on_initialize(T::BlockNumber::zero()); + as pallet_treasury::SpendFunds>::spend_funds( + &mut budget_remaining, + &mut imbalance, + &mut total_weight, + &mut missed_any, + ); + } + verify { + ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); + ensure!(missed_any == false, "Missed some"); + assert_last_event::(RawEvent::BountyBecameActive(b - 1).into()) } } @@ -205,7 +235,7 @@ mod tests { assert_ok!(test_benchmark_close_bounty_proposed::()); assert_ok!(test_benchmark_close_bounty_active::()); assert_ok!(test_benchmark_extend_bounty_expiry::()); - assert_ok!(test_benchmark_on_initialize_bounties::()); + assert_ok!(test_benchmark_spend_funds::()); }); } } diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index 77f5e2200b063..1f35aab68aced 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -779,12 +779,12 @@ impl Module { } impl pallet_treasury::SpendFunds for Module { - - fn spend_funds( budget_remaining: &mut BalanceOf, - imbalance: &mut PositiveImbalanceOf, - total_weight: &mut Weight, - missed_any: &mut bool ) { - + fn spend_funds( + budget_remaining: &mut BalanceOf, + imbalance: &mut PositiveImbalanceOf, + total_weight: &mut Weight, + missed_any: &mut bool + ) { let bounties_len = BountyApprovals::mutate(|v| { let bounties_approval_len = v.len() as u32; v.retain(|&index| { @@ -816,7 +816,7 @@ impl pallet_treasury::SpendFunds for Module { bounties_approval_len }); - *total_weight += ::WeightInfo::on_initialize_bounties(bounties_len); + *total_weight += ::WeightInfo::spend_funds(bounties_len); } } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index 2fe4f45fd3d6c..f21070058471b 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -103,28 +103,6 @@ impl pallet_balances::Trait for Test { thread_local! { static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); } -pub struct TenToFourteen; -impl Contains for TenToFourteen { - fn sorted_members() -> Vec { - TEN_TO_FOURTEEN.with(|v| { - v.borrow().clone() - }) - } - #[cfg(feature = "runtime-benchmarks")] - fn add(new: &u128) { - TEN_TO_FOURTEEN.with(|v| { - let mut members = v.borrow_mut(); - members.push(*new); - members.sort(); - }) - } -} -impl ContainsLengthBound for TenToFourteen { - fn max_len() -> usize { - TEN_TO_FOURTEEN.with(|v| v.borrow().len()) - } - fn min_len() -> usize { 0 } -} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const ProposalBondMinimum: u64 = 1; diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index 4b06458bd83e3..c48adebc2a907 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -54,7 +54,7 @@ pub trait WeightInfo { fn close_bounty_proposed() -> Weight; fn close_bounty_active() -> Weight; fn extend_bounty_expiry() -> Weight; - fn on_initialize_bounties(b: u32, ) -> Weight; + fn spend_funds(b: u32, ) -> Weight; } /// Weights for pallet_bounties using the Substrate node and recommended hardware. @@ -111,7 +111,7 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - fn on_initialize_bounties(b: u32, ) -> Weight { + fn spend_funds(b: u32, ) -> Weight { (75_165_000 as Weight) .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) @@ -174,7 +174,7 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } - fn on_initialize_bounties(b: u32, ) -> Weight { + fn spend_funds(b: u32, ) -> Weight { (75_165_000 as Weight) .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) From 8c955ac0632d6233c0ae08d21c7e021d7f5e7a6e Mon Sep 17 00:00:00 2001 From: Parity Benchmarking Bot Date: Fri, 20 Nov 2020 19:15:01 +0000 Subject: [PATCH 34/34] cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_bounties --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/bounties/src/weights.rs --template=./.maintain/frame-weight-template.hbs --- frame/bounties/src/weights.rs | 56 +++++++++++++++++------------------ 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index c48adebc2a907..fbe1a59643306 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -61,62 +61,62 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn propose_bounty(d: u32, ) -> Weight { - (61_811_000 as Weight) + (59_931_000 as Weight) .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn approve_bounty() -> Weight { - (17_579_000 as Weight) + (17_226_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn propose_curator() -> Weight { - (13_722_000 as Weight) + (13_314_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn unassign_curator() -> Weight { - (49_951_000 as Weight) + (48_677_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn accept_curator() -> Weight { - (50_197_000 as Weight) + (48_727_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn award_bounty() -> Weight { - (35_999_000 as Weight) + (34_839_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn claim_bounty() -> Weight { - (67_197_000 as Weight) + (64_883_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_bounty_proposed() -> Weight { - (49_043_000 as Weight) + (48_003_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_bounty_active() -> Weight { - (63_202_000 as Weight) + (62_215_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn extend_bounty_expiry() -> Weight { - (34_943_000 as Weight) + (33_748_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn spend_funds(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + (3_688_000 as Weight) + .saturating_add((70_129_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } } @@ -124,62 +124,62 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn propose_bounty(d: u32, ) -> Weight { - (61_811_000 as Weight) + (59_931_000 as Weight) .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn approve_bounty() -> Weight { - (17_579_000 as Weight) + (17_226_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn propose_curator() -> Weight { - (13_722_000 as Weight) + (13_314_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn unassign_curator() -> Weight { - (49_951_000 as Weight) + (48_677_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn accept_curator() -> Weight { - (50_197_000 as Weight) + (48_727_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn award_bounty() -> Weight { - (35_999_000 as Weight) + (34_839_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn claim_bounty() -> Weight { - (67_197_000 as Weight) + (64_883_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn close_bounty_proposed() -> Weight { - (49_043_000 as Weight) + (48_003_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn close_bounty_active() -> Weight { - (63_202_000 as Weight) + (62_215_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn extend_bounty_expiry() -> Weight { - (34_943_000 as Weight) + (33_748_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn spend_funds(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + (3_688_000 as Weight) + .saturating_add((70_129_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } }