diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 839519a69526..0e69a91af165 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -204,8 +204,6 @@ build: - mv -v ./target/release/rialto-parachain-collator ./artifacts/ - strip ./target/release/millau-bridge-node - mv -v ./target/release/millau-bridge-node ./artifacts/ - - strip ./target/release/ethereum-poa-relay - - mv -v ./target/release/ethereum-poa-relay ./artifacts/ - strip ./target/release/substrate-relay - mv -v ./target/release/substrate-relay ./artifacts/ - mv -v ./deployments/local-scripts/bridge-entrypoint.sh ./artifacts/ @@ -286,10 +284,6 @@ millau-bridge-node: stage: publish <<: *build-push-image -ethereum-poa-relay: - stage: publish - <<: *build-push-image - substrate-relay: stage: publish <<: *build-push-image diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000000..3941ba8451a1 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,21 @@ +# Lists some code owners. +# +# A codeowner just oversees some part of the codebase. If an owned file is changed then the +# corresponding codeowner receives a review request. An approval of the codeowner might be +# required for merging a PR (depends on repository settings). +# +# For details about syntax, see: +# https://help.github.com/en/articles/about-code-owners +# But here are some important notes: +# +# - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` +# which can be everywhere. +# - Multiple owners are supported. +# - Either handle (e.g, @github_user or @github_org/team) or email can be used. Keep in mind, +# that handles might work better because they are more recognizable on GitHub, +# eyou can use them for mentioning unlike an email. +# - The latest matching rule, if multiple, takes precedence. + +# CI +/.github/ @paritytech/ci +/.gitlab-ci.yml @paritytech/ci diff --git a/Cargo.lock b/Cargo.lock index 9d996238141c..115fe33dee38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,16 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" dependencies = [ - "gimli", + "gimli 0.25.0", +] + +[[package]] +name = "addr2line" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +dependencies = [ + "gimli 0.26.1", ] [[package]] @@ -64,9 +73,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ "getrandom 0.2.3", "once_cell", @@ -108,9 +117,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.44" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61604a8f862e1d5c3229fdd78f8b02c68dcf73a4c4b05fd636d12240aaa242c1" +checksum = "0a03e93e97a28fbc9f42fbc5ba0886a3c67eb637b476dbee711f80a6ffe8223d" [[package]] name = "approx" @@ -123,9 +132,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "577b08a4acd7b99869f863c50011b01eb73424ccc798ecd996f2e24817adfca7" +checksum = "510c76ecefdceada737ea728f4f9a84bd2e1ef29f1ba555e560940fe279954de" [[package]] name = "array_tool" @@ -156,15 +165,15 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4dc07131ffa69b8072d35f5007352af944213cde02545e2103680baed38fcd" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "asn1_der" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" +checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "assert_matches" @@ -237,7 +246,7 @@ dependencies = [ "parking", "polling", "slab", - "socket2 0.4.1", + "socket2 0.4.2", "waker-fn", "winapi 0.3.9", ] @@ -262,9 +271,9 @@ dependencies = [ [[package]] name = "async-process" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b21b63ab5a0db0369deb913540af2892750e42d949faacc7a61495ac418a1692" +checksum = "83137067e3a2a6a06d67168e49e68a0957d215410473a740cea95a2425c0b7c6" dependencies = [ "async-io", "blocking", @@ -365,9 +374,9 @@ dependencies = [ [[package]] name = "atomic" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" dependencies = [ "autocfg", ] @@ -407,11 +416,11 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" +checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6" dependencies = [ - "addr2line", + "addr2line 0.17.0", "cc", "cfg-if 1.0.0", "libc", @@ -441,15 +450,9 @@ checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" [[package]] name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "base64" -version = "0.12.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" [[package]] name = "base64" @@ -469,11 +472,11 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "beefy-primitives", "fnv", - "futures 0.3.17", + "futures 0.3.18", "log", "parity-scale-codec", "parking_lot 0.11.2", @@ -497,12 +500,12 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "beefy-gadget", "beefy-primitives", - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", @@ -517,12 +520,12 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "scale-info", @@ -533,6 +536,12 @@ dependencies = [ "sp-std", ] +[[package]] +name = "bimap" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50ae17cabbc8a38a1e3e4c1a6a664e9a09672dc14d0896fa8d865d3a5a446b07" + [[package]] name = "bincode" version = "1.3.3" @@ -544,9 +553,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", @@ -567,18 +576,6 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" -[[package]] -name = "bitvec" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" -dependencies = [ - "funty", - "radium 0.5.3", - "tap", - "wyz", -] - [[package]] name = "bitvec" version = "0.20.4" @@ -586,7 +583,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" dependencies = [ "funty", - "radium 0.6.2", + "radium", "tap", "wyz", ] @@ -688,9 +685,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "blocking" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" +checksum = "046e47d4b2d391b1f6f8b407b1deb8dee56c1852ccd868becf2710f601b5f427" dependencies = [ "async-channel", "async-task", @@ -709,43 +706,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "bp-currency-exchange" -version = "0.1.0" -dependencies = [ - "frame-support", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-std", -] - -[[package]] -name = "bp-eth-poa" -version = "0.1.0" -dependencies = [ - "ethbloom 0.10.0", - "fixed-hash", - "hash-db", - "hex-literal 0.2.1", - "impl-rlp", - "impl-serde", - "libsecp256k1 0.7.0", - "parity-bytes", - "parity-scale-codec", - "plain_hasher", - "primitive-types", - "rlp", - "scale-info", - "serde", - "serde-big-array", - "sp-api", - "sp-io", - "sp-runtime", - "sp-std", - "triehash", -] - [[package]] name = "bp-header-chain" version = "0.1.0" @@ -792,7 +752,7 @@ dependencies = [ name = "bp-messages" version = "0.1.0" dependencies = [ - "bitvec 0.20.4", + "bitvec", "bp-runtime", "frame-support", "frame-system", @@ -908,6 +868,7 @@ version = "0.1.0" dependencies = [ "frame-support", "hash-db", + "hex-literal", "num-traits", "parity-scale-codec", "scale-info", @@ -1007,9 +968,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "memchr", ] @@ -1025,15 +986,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" [[package]] name = "byte-slice-cast" -version = "1.0.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" +checksum = "1d30c751592b77c499e7bce34d99d67c2c11bdc0574e9a488ddade14150a4698" [[package]] name = "byte-tools" @@ -1057,12 +1018,6 @@ dependencies = [ "iovec", ] -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.1.0" @@ -1095,32 +1050,37 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.13.1" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "081e3f0755c1f380c2d010481b6fa2e02973586d5f2b24eebb7a2a1d98b143d8" +checksum = "ba2ae6de944143141f6155a473a6b02f66c7c3f9f47316f802f80204ebfe6e12" dependencies = [ "camino", "cargo-platform", - "semver 0.11.0", - "semver-parser 0.10.2", + "semver 1.0.4", "serde", "serde_json", ] +[[package]] +name = "castaway" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed247d1586918e46f2bbe0f13b06498db8dab5a8c1093f156652e9f2e0a73fc3" + [[package]] name = "cc" -version = "1.0.70" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" dependencies = [ "jobserver", ] [[package]] name = "cexpr" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ "nom", ] @@ -1212,13 +1172,13 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10612c0ec0e0a1ff0e97980647cb058a6e7aedb913d01d009c406b8b7d0b26ee" +checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" dependencies = [ "glob", "libc", - "libloading 0.7.0", + "libloading 0.7.2", ] [[package]] @@ -1234,7 +1194,6 @@ dependencies = [ "textwrap", "unicode-width", "vec_map", - "yaml-rust", ] [[package]] @@ -1275,9 +1234,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" dependencies = [ "core-foundation-sys", "libc", @@ -1285,15 +1244,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpp_demangle" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea47428dc9d2237f3c6bc134472edfd63ebba0af932e783506dcfd66f10d18a" +checksum = "931ab2a3e6330a07900b8e7ca4e106cdcbb93f2b9a52df55e54ee53d8305b55d" dependencies = [ "cfg-if 1.0.0", ] @@ -1318,36 +1277,35 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.76.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e6bea67967505247f54fa2c85cf4f6e0e31c4e5692c9b70e4ae58e339067333" +checksum = "cc0cb7df82c8cf8f2e6a8dd394a0932a71369c160cc9b027dca414fced242513" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.76.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48194035d2752bdd5bdae429e3ab88676e95f52a2b1355a5d4e809f9e39b1d74" +checksum = "fe4463c15fa42eee909e61e5eac4866b7c6d22d0d8c621e57a0c5380753bfa8c" dependencies = [ "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-entity", - "gimli", + "gimli 0.25.0", "log", "regalloc", - "serde", "smallvec", "target-lexicon", ] [[package]] name = "cranelift-codegen-meta" -version = "0.76.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976efb22fcab4f2cd6bd4e9913764616a54d895c1a23530128d04e03633c555f" +checksum = "793f6a94a053a55404ea16e1700202a88101672b8cd6b4df63e13cde950852bf" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -1355,27 +1313,24 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.76.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc" -dependencies = [ - "serde", -] +checksum = "44aa1846df275bce5eb30379d65964c7afc63c05a117076e62a119c25fe174be" [[package]] name = "cranelift-entity" -version = "0.76.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799" +checksum = "a3a45d8d6318bf8fc518154d9298eab2a8154ec068a8885ff113f6db8d69bb3a" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.76.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279afcc0d3e651b773f94837c3d581177b348c8d69e928104b2e9fccb226f921" +checksum = "e07339bd461766deb7605169de039e01954768ff730fa1254e149001884a8525" dependencies = [ "cranelift-codegen", "log", @@ -1385,9 +1340,9 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.76.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c04d1fe6a5abb5bb0edc78baa8ef238370fb8e389cc88b6d153f7c3e9680425" +checksum = "03e2fca76ff57e0532936a71e3fc267eae6a19a86656716479c66e7f912e3d7b" dependencies = [ "cranelift-codegen", "libc", @@ -1396,26 +1351,25 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.76.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d260ad44f6fd2c91f7f5097191a2a9e3edcbb36df1fb787b600dad5ea148ec" +checksum = "1f46fec547a1f8a32c54ea61c28be4f4ad234ad95342b718a9a9adcaadb0c778" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", "itertools", "log", - "serde", "smallvec", - "thiserror", "wasmparser", + "wasmtime-types", ] [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "3825b1e8580894917dc4468cb634a1b4e9745fddc854edad72d9c04644c0319f" dependencies = [ "cfg-if 1.0.0", ] @@ -1532,7 +1486,7 @@ dependencies = [ [[package]] name = "cumulus-client-cli" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "sc-cli", "sc-service", @@ -1542,12 +1496,12 @@ dependencies = [ [[package]] name = "cumulus-client-collator" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-client-consensus-common", "cumulus-client-network", "cumulus-primitives-core", - "futures 0.3.17", + "futures 0.3.18", "parity-scale-codec", "parking_lot 0.10.2", "polkadot-node-primitives", @@ -1565,12 +1519,12 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-aura" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "async-trait", "cumulus-client-consensus-common", "cumulus-primitives-core", - "futures 0.3.17", + "futures 0.3.18", "parity-scale-codec", "polkadot-client", "sc-client-api", @@ -1595,11 +1549,11 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-common" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "async-trait", "dyn-clone", - "futures 0.3.17", + "futures 0.3.18", "parity-scale-codec", "polkadot-primitives", "sc-client-api", @@ -1615,10 +1569,10 @@ dependencies = [ [[package]] name = "cumulus-client-network" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "derive_more", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "parity-scale-codec", "parking_lot 0.10.2", @@ -1638,10 +1592,10 @@ dependencies = [ [[package]] name = "cumulus-client-pov-recovery" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-primitives-core", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "parity-scale-codec", "polkadot-node-primitives", @@ -1661,7 +1615,7 @@ dependencies = [ [[package]] name = "cumulus-client-service" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-common", @@ -1690,7 +1644,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-aura-ext" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "frame-executive", "frame-support", @@ -1708,7 +1662,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-dmp-queue" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1726,7 +1680,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-parachain-system" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-pallet-parachain-system-proc-macro", "cumulus-primitives-core", @@ -1755,7 +1709,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-parachain-system-proc-macro" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -1766,7 +1720,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-xcm" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1783,7 +1737,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-xcmp-queue" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1801,7 +1755,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-core" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "frame-support", "impl-trait-for-tuples", @@ -1818,7 +1772,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-parachain-inherent" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -1840,7 +1794,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-timestamp" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-primitives-core", "sp-inherents", @@ -1851,7 +1805,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-utility" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1868,7 +1822,7 @@ dependencies = [ [[package]] name = "cumulus-test-relay-sproof-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-primitives-core", "parity-scale-codec", @@ -1880,24 +1834,24 @@ dependencies = [ [[package]] name = "curl" -version = "0.4.38" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003cb79c1c6d1c93344c7e1201bb51c2148f24ec2bd9c253709d6b2efb796515" +checksum = "1bc6d233563261f8db6ffb83bbaad5a73837a6e6b28868e926337ebbdece0be3" dependencies = [ "curl-sys", "libc", "openssl-probe", "openssl-sys", "schannel", - "socket2 0.4.1", + "socket2 0.4.2", "winapi 0.3.9", ] [[package]] name = "curl-sys" -version = "0.4.45+curl-7.78.0" +version = "0.4.51+curl-7.80.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9e5a72b1c744eb5dd20b2be4d7eb84625070bb5c4ab9b347b70464ab1e62eb" +checksum = "d130987e6a6a34fe0889e1083022fa48cd90e6709a84be3fb8dd95801de5af20" dependencies = [ "cc", "libc", @@ -1974,14 +1928,14 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.16" +version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.3.3", + "rustc_version 0.4.0", "syn", ] @@ -2005,9 +1959,9 @@ dependencies = [ [[package]] name = "directories" -version = "3.0.2" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" +checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" dependencies = [ "dirs-sys", ] @@ -2072,6 +2026,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +[[package]] +name = "dtoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" + [[package]] name = "dyn-clonable" version = "0.9.0" @@ -2101,9 +2061,9 @@ checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" [[package]] name = "ed25519" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" dependencies = [ "signature", ] @@ -2130,9 +2090,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.28" +version = "0.8.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746" dependencies = [ "cfg-if 1.0.0", ] @@ -2225,20 +2185,11 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" -[[package]] -name = "erased-serde" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3de9ad4541d99dc22b59134e7ff8dc3d6c988c89ecd7324bf10a8362b07a2afa" -dependencies = [ - "serde", -] - [[package]] name = "errno" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" dependencies = [ "errno-dragonfly", "libc", @@ -2247,64 +2198,19 @@ dependencies = [ [[package]] name = "errno-dragonfly" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ - "gcc", + "cc", "libc", ] -[[package]] -name = "ethabi" -version = "14.0.0" -source = "git+https://github.com/svyatonik/ethabi.git?branch=bump-deps#19bb6ea4a8099af1d70ab8c0ddcd3dec8fa45ed8" -dependencies = [ - "anyhow", - "ethereum-types", - "hex", - "serde", - "serde_json", - "sha3", - "thiserror", - "uint", -] - -[[package]] -name = "ethabi-contract" -version = "11.0.0" -source = "git+https://github.com/svyatonik/ethabi.git?branch=bump-deps#19bb6ea4a8099af1d70ab8c0ddcd3dec8fa45ed8" - -[[package]] -name = "ethabi-derive" -version = "14.0.0" -source = "git+https://github.com/svyatonik/ethabi.git?branch=bump-deps#19bb6ea4a8099af1d70ab8c0ddcd3dec8fa45ed8" -dependencies = [ - "anyhow", - "ethabi", - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "ethbloom" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a621dcebea74f2a6f2002d0a885c81ccf6cbdf86760183316a7722b5707ca4" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "tiny-keccak", -] - [[package]] name = "ethbloom" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "779864b9c7f7ead1f092972c3257496c6a84b46dba2ce131dd8a282cb2cc5972" +checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" dependencies = [ "crunchy", "fixed-hash", @@ -2313,64 +2219,13 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "ethereum-contract-builtin" -version = "0.1.0" -dependencies = [ - "ethereum-types", - "finality-grandpa", - "hex", - "log", - "parity-scale-codec", - "rialto-runtime", - "sc-finality-grandpa", - "sp-blockchain", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", -] - -[[package]] -name = "ethereum-poa-relay" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-std", - "async-trait", - "bp-currency-exchange", - "bp-eth-poa", - "clap", - "ethabi", - "ethabi-contract", - "ethabi-derive", - "exchange-relay", - "futures 0.3.17", - "headers-relay", - "hex", - "hex-literal 0.3.3", - "libsecp256k1 0.7.0", - "log", - "num-traits", - "parity-scale-codec", - "relay-ethereum-client", - "relay-rialto-client", - "relay-substrate-client", - "relay-utils", - "rialto-runtime", - "serde_json", - "sp-core", - "sp-keyring", - "sp-runtime", - "thiserror", -] - [[package]] name = "ethereum-types" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd6bde671199089e601e8d47e153368b893ef885f11f365a3261ec58153c211" +checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" dependencies = [ - "ethbloom 0.11.0", + "ethbloom", "fixed-hash", "impl-rlp", "impl-serde", @@ -2384,29 +2239,13 @@ version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" -[[package]] -name = "exchange-relay" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-std", - "async-trait", - "backoff", - "futures 0.3.17", - "log", - "num-traits", - "parking_lot 0.11.2", - "relay-utils", - "thiserror", -] - [[package]] name = "exit-future" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", ] [[package]] @@ -2456,7 +2295,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8ac3ff5224ef91f3c97e03eb1de2db82743427e91aaa5ac635f454f0b164f5a" dependencies = [ "either", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "log", "num-traits", @@ -2473,8 +2312,7 @@ dependencies = [ "async-trait", "backoff", "bp-header-chain", - "futures 0.3.17", - "headers-relay", + "futures 0.3.18", "log", "num-traits", "parking_lot 0.11.2", @@ -2495,9 +2333,9 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" +checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" [[package]] name = "flate2" @@ -2518,25 +2356,10 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", ] @@ -2554,7 +2377,7 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -2574,7 +2397,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "Inflector", "chrono", @@ -2600,7 +2423,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -2614,7 +2437,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -2629,9 +2452,9 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "14.0.0" +version = "14.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96616f82e069102b95a72c87de4c84d2f87ef7f0f20630e78ce3824436483110" +checksum = "37ed5e5c346de62ca5c184b4325a6600d1eaca210666e4606fe4e449574978d0" dependencies = [ "cfg-if 1.0.0", "parity-scale-codec", @@ -2642,7 +2465,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "bitflags", "frame-metadata", @@ -2657,6 +2480,7 @@ dependencies = [ "smallvec", "sp-arithmetic", "sp-core", + "sp-core-hashing-proc-macro", "sp-inherents", "sp-io", "sp-runtime", @@ -2664,12 +2488,13 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", + "tt-call", ] [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "Inflector", "frame-support-procedural-tools", @@ -2681,7 +2506,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 1.1.0", @@ -2693,7 +2518,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "proc-macro2", "quote", @@ -2703,7 +2528,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "log", @@ -2720,7 +2545,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -2735,7 +2560,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "sp-api", @@ -2744,7 +2569,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "sp-api", @@ -2810,9 +2635,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "8cd0210d8c325c245ff06fd95a3b13689a1a276ac8cfa8e8720cb840bfb84b9e" dependencies = [ "futures-channel", "futures-core", @@ -2825,9 +2650,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "7fc8cd39e3dbf865f7340dce6a2d401d24fd37c6fe6c4f0ee0de8bfca2252d27" dependencies = [ "futures-core", "futures-sink", @@ -2835,15 +2660,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "629316e42fe7c2a0b9a65b47d159ceaa5453ab14e8f0a3c5eedbb8cd55b4a445" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "7b808bf53348a36cab739d7e04755909b9fcaaa69b7d7e588b37b6ec62704c97" dependencies = [ "futures-core", "futures-task", @@ -2853,9 +2678,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "e481354db6b5c353246ccf6a728b0c5511d752c08da7260546fc0933869daa11" [[package]] name = "futures-lite" @@ -2874,12 +2699,10 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "a89f17b21645bc4ed773c69af9c9a0effd4a3f1a3876eadd453469f8854e7fdd" dependencies = [ - "autocfg", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -2898,15 +2721,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "996c6442437b62d21a32cd9906f9c41e7dc1e19a9579843fad948696769305af" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "dabf1872aaab32c886832f2276d2f5399887e2bd613698a02359e4ea83f8de12" [[package]] name = "futures-timer" @@ -2922,11 +2745,10 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "41d22213122356472061ac0f1ab2cee28d2bac8491410fd68c2af53d1cedb83e" dependencies = [ - "autocfg", "futures 0.1.31", "futures-channel", "futures-core", @@ -2937,17 +2759,9 @@ dependencies = [ "memchr", "pin-project-lite 0.2.7", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] -[[package]] -name = "gcc" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" - [[package]] name = "generic-array" version = "0.12.4" @@ -3012,6 +2826,12 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "gimli" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" + [[package]] name = "glob" version = "0.3.0" @@ -3046,9 +2866,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.4" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" dependencies = [ "bytes 1.1.0", "fnv", @@ -3065,9 +2885,9 @@ dependencies = [ [[package]] name = "handlebars" -version = "3.5.5" +version = "4.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" +checksum = "8ad84da8f63da982543fc85fcabaee2ad1fdd809d99d64a48887e2e942ddfe46" dependencies = [ "log", "pest", @@ -3102,61 +2922,21 @@ dependencies = [ ] [[package]] -name = "headers" -version = "0.3.4" +name = "heck" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0b7591fb62902706ae8e7aaff416b1b0fa2c0fd0878b46dc13baa3712d8a855" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ - "base64 0.13.0", - "bitflags", - "bytes 1.1.0", - "headers-core", - "http", - "mime", - "sha-1 0.9.8", - "time 0.1.44", + "unicode-segmentation", ] [[package]] -name = "headers-core" -version = "0.2.0" +name = "hermit-abi" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ - "http", -] - -[[package]] -name = "headers-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "backoff", - "futures 0.3.17", - "linked-hash-map", - "log", - "num-traits", - "parking_lot 0.11.2", - "relay-utils", -] - -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", + "libc", ] [[package]] @@ -3167,28 +2947,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0" -dependencies = [ - "hex-literal-impl", - "proc-macro-hack", -] - -[[package]] -name = "hex-literal" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e4590e13640f19f249fe3e4eca5113bc4289f2497710378190e7f4bd96f45b" - -[[package]] -name = "hex-literal-impl" -version = "0.2.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853f769599eb31de176303197b7ba4973299c38c7a7604a6bc88c3eef05b9b46" -dependencies = [ - "proc-macro-hack", -] +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" [[package]] name = "hex_fmt" @@ -3251,9 +3012,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" dependencies = [ "bytes 1.1.0", "fnv", @@ -3262,9 +3023,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes 1.1.0", "http", @@ -3279,9 +3040,9 @@ checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "humantime" @@ -3300,9 +3061,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.12" +version = "0.14.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" +checksum = "436ec0091e4f20e655156a30a0df3770fe2900aa301e548e08446ec794b6953c" dependencies = [ "bytes 1.1.0", "futures-channel", @@ -3315,7 +3076,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.7", - "socket2 0.4.1", + "socket2 0.4.2", "tokio", "tower-service", "tracing", @@ -3339,19 +3100,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes 1.1.0", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "idna" version = "0.1.5" @@ -3376,9 +3124,9 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a83ec4af652890ac713ffd8dc859e650420a5ef47f7b9be29b6664ab50fbc8" +checksum = "2273e421f7c4f0fc99e1934fe4776f59d8df2972f4199d703fc0da9f2a9f73de" dependencies = [ "if-addrs-sys", "libc", @@ -3402,7 +3150,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae8ab7f67bad3240049cb24fb9cb0b4c2c6af4c245840917fbbdededeee91179" dependencies = [ "async-io", - "futures 0.3.17", + "futures 0.3.18", "futures-lite", "if-addrs", "ipnet", @@ -3431,9 +3179,9 @@ dependencies = [ [[package]] name = "impl-serde" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" dependencies = [ "serde", ] @@ -3462,9 +3210,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if 1.0.0", ] @@ -3490,10 +3238,20 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-timer 2.0.2", ] +[[package]] +name = "io-lifetimes" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "278e90d6f8a6c76a8334b336e306efa3c5f2b604048cbfd486d6f49878e3af14" +dependencies = [ + "rustc_version 0.4.0", + "winapi 0.3.9", +] + [[package]] name = "iovec" version = "0.1.4" @@ -3505,9 +3263,9 @@ dependencies = [ [[package]] name = "ip_network" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b746553d2f4a1ca26fab939943ddfb217a091f34f53571620a8e3d30691303" +checksum = "aa2f047c0a98b2f299aa5d6d7088443570faae494e9ae1305e48be000c9e0eb1" [[package]] name = "ipconfig" @@ -3518,7 +3276,7 @@ dependencies = [ "socket2 0.3.19", "widestring", "winapi 0.3.9", - "winreg 0.6.2", + "winreg", ] [[package]] @@ -3529,11 +3287,12 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "isahc" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "431445cb4ba85a80cb1438a9ae8042dadb78ae4046ecee89ad027b614aa0ddb7" +checksum = "d140e84730d325378912ede32d7cd53ef1542725503b3353e5ec8113c7c6f588" dependencies = [ "async-channel", + "castaway", "crossbeam-utils", "curl", "curl-sys", @@ -3579,9 +3338,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.54" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1866b355d9c878e5e607473cbe3f63282c0b7aad2db1dbebf55076c686918254" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -3606,8 +3365,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-pubsub", "log", "serde", @@ -3615,28 +3374,13 @@ dependencies = [ "url 1.7.2", ] -[[package]] -name = "jsonrpc-core" -version = "17.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4467ab6dfa369b69e52bd0692e480c4d117410538526a57a304a0f2250fd95e" -dependencies = [ - "futures 0.3.17", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", -] - [[package]] name = "jsonrpc-core" version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-executor", "futures-util", "log", @@ -3651,7 +3395,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "jsonrpc-client-transports", ] @@ -3673,9 +3417,9 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "hyper", - "jsonrpc-core 18.0.0", + "jsonrpc-core", "jsonrpc-server-utils", "log", "net2", @@ -3689,8 +3433,8 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-server-utils", "log", "parity-tokio-ipc", @@ -3704,8 +3448,8 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "lazy_static", "log", "parking_lot 0.11.2", @@ -3720,9 +3464,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes 1.1.0", - "futures 0.3.17", + "futures 0.3.18", "globset", - "jsonrpc-core 18.0.0", + "jsonrpc-core", "lazy_static", "log", "tokio", @@ -3737,8 +3481,8 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f892c7d766369475ab7b0669f417906302d7c0fb521285c0a0c92e52e7c8e946" dependencies = [ - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-server-utils", "log", "parity-ws", @@ -3746,6 +3490,18 @@ dependencies = [ "slab", ] +[[package]] +name = "jsonrpsee" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373a33d987866ccfe1af4bc11b089dce941764313f9fd8b7cf13fcb51b72dc5" +dependencies = [ + "jsonrpsee-proc-macros 0.4.1", + "jsonrpsee-types 0.4.1", + "jsonrpsee-utils", + "jsonrpsee-ws-client 0.4.1", +] + [[package]] name = "jsonrpsee-proc-macros" version = "0.3.1" @@ -3760,6 +3516,19 @@ dependencies = [ "syn", ] +[[package]] +name = "jsonrpsee-proc-macros" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d802063f7a3c867456955f9d2f15eb3ee0edb5ec9ec2b5526324756759221c0f" +dependencies = [ + "log", + "proc-macro-crate 1.1.0", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "jsonrpsee-types" version = "0.3.1" @@ -3778,6 +3547,36 @@ dependencies = [ "thiserror", ] +[[package]] +name = "jsonrpsee-types" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62f778cf245158fbd8f5d50823a2e9e4c708a40be164766bd35e9fb1d86715b2" +dependencies = [ + "anyhow", + "async-trait", + "beef", + "futures-channel", + "futures-util", + "hyper", + "log", + "serde", + "serde_json", + "soketto 0.7.1", + "thiserror", +] + +[[package]] +name = "jsonrpsee-utils" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0109c4f972058f3b1925b73a17210aff7b63b65967264d0045d15ee88fe84f0c" +dependencies = [ + "arrayvec 0.7.2", + "beef", + "jsonrpsee-types 0.4.1", +] + [[package]] name = "jsonrpsee-ws-client" version = "0.3.1" @@ -3786,8 +3585,8 @@ checksum = "9841352dbecf4c2ed5dc71698df9f1660262ae4e0b610e968602529bdbcf7b30" dependencies = [ "async-trait", "fnv", - "futures 0.3.17", - "jsonrpsee-types", + "futures 0.3.18", + "jsonrpsee-types 0.3.1", "log", "pin-project 1.0.8", "rustls", @@ -3802,6 +3601,30 @@ dependencies = [ "url 2.2.2", ] +[[package]] +name = "jsonrpsee-ws-client" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "559aa56fc402af206c00fc913dc2be1d9d788dcde045d14df141a535245d35ef" +dependencies = [ + "arrayvec 0.7.2", + "async-trait", + "fnv", + "futures 0.3.18", + "http", + "jsonrpsee-types 0.4.1", + "log", + "pin-project 1.0.8", + "rustls-native-certs", + "serde", + "serde_json", + "soketto 0.7.1", + "thiserror", + "tokio", + "tokio-rustls", + "tokio-util", +] + [[package]] name = "keccak" version = "0.1.0" @@ -3898,9 +3721,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.103" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6" +checksum = "8521a1b57e76b1ec69af7599e75e38e7b7fad6610f037db8c79b127201b5d119" [[package]] name = "libloading" @@ -3914,9 +3737,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" +checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" dependencies = [ "cfg-if 1.0.0", "winapi 0.3.9", @@ -3930,9 +3753,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libnghttp2-sys" -version = "0.1.6+1.43.0" +version = "0.1.7+1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0af55541a8827e138d59ec9e5877fb6095ece63fb6f4da45e7491b4fbd262855" +checksum = "57ed28aba195b38d5ff02b9170cbff627e336a20925e43b4945390401c5dc93f" dependencies = [ "cc", "libc", @@ -3940,13 +3763,13 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.39.1" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9004c06878ef8f3b4b4067e69a140d87ed20bf777287f82223e49713b36ee433" +checksum = "3bec54343492ba5940a6c555e512c6721139835d28c59bc22febece72dfd0d9d" dependencies = [ "atomic", "bytes 1.1.0", - "futures 0.3.17", + "futures 0.3.18", "lazy_static", "libp2p-core", "libp2p-deflate", @@ -3956,12 +3779,14 @@ dependencies = [ "libp2p-identify", "libp2p-kad", "libp2p-mdns", + "libp2p-metrics", "libp2p-mplex", "libp2p-noise", "libp2p-ping", "libp2p-plaintext", "libp2p-pnet", "libp2p-relay", + "libp2p-rendezvous", "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", @@ -3979,19 +3804,19 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59" +checksum = "bef22d9bba1e8bcb7ec300073e6802943fe8abb8190431842262b5f1c30abba1" dependencies = [ "asn1_der", "bs58", "ed25519-dalek", "either", "fnv", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "lazy_static", - "libsecp256k1 0.5.0", + "libsecp256k1", "log", "multiaddr", "multihash 0.14.0", @@ -4000,36 +3825,36 @@ dependencies = [ "pin-project 1.0.8", "prost", "prost-build", - "rand 0.7.3", + "rand 0.8.4", "ring", "rw-stream-sink", "sha2 0.9.8", "smallvec", "thiserror", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", "void", "zeroize", ] [[package]] name = "libp2p-deflate" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66097fccc0b7f8579f90a03ea76ba6196332ea049fd07fd969490a06819dcdc8" +checksum = "51a800adb195f33de63f4b17b63fe64cfc23bf2c6a0d3d0d5321328664e65197" dependencies = [ "flate2", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ff08b3196b85a17f202d80589e93b1660a574af67275706657fdc762e42c32" +checksum = "bb8f89d15cb6e3c5bc22afff7513b11bab7856f2872d3cfba86f7f63a06bc498" dependencies = [ "async-std-resolver", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "log", "smallvec", @@ -4038,13 +3863,13 @@ dependencies = [ [[package]] name = "libp2p-floodsub" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "404eca8720967179dac7a5b4275eb91f904a53859c69ca8d018560ad6beb214f" +checksum = "aab3d7210901ea51b7bae2b581aa34521797af8c4ec738c980bda4a06434067f" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "libp2p-swarm", "log", @@ -4056,16 +3881,16 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1cc48709bcbc3a3321f08a73560b4bbb4166a7d56f6fdb615bc775f4f91058e" +checksum = "dfeead619eb5dac46e65acc78c535a60aaec803d1428cca6407c3a4fc74d698d" dependencies = [ "asynchronous-codec 0.6.0", - "base64 0.13.0", + "base64", "byteorder", "bytes 1.1.0", "fnv", - "futures 0.3.17", + "futures 0.3.18", "hex_fmt", "libp2p-core", "libp2p-swarm", @@ -4076,20 +3901,21 @@ dependencies = [ "regex", "sha2 0.9.8", "smallvec", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7b61f6cf07664fb97016c318c4d4512b3dd4cc07238607f3f0163245f99008e" +checksum = "cca1275574183f288ff8b72d535d5ffa5ea9292ef7829af8b47dcb197c7b0dcd" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "libp2p-swarm", "log", + "lru 0.6.6", "prost", "prost-build", "smallvec", @@ -4098,16 +3924,16 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50ed78489c87924235665a0ab345b298ee34dff0f7ad62c0ba6608b2144fb75e" +checksum = "a2297dc0ca285f3a09d1368bde02449e539b46f94d32d53233f53f6625bcd3ba" dependencies = [ "arrayvec 0.5.2", "asynchronous-codec 0.6.0", "bytes 1.1.0", "either", "fnv", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "libp2p-swarm", "log", @@ -4117,21 +3943,21 @@ dependencies = [ "sha2 0.9.8", "smallvec", "uint", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", "void", "wasm-timer", ] [[package]] name = "libp2p-mdns" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a29e6cbc2a24b8471b6567e580a0e8e7b70a6d0f0ea2be0844d1e842d7d4fa33" +checksum = "14c864b64bdc8a84ff3910a0df88e6535f256191a450870f1e7e10cbf8e64d45" dependencies = [ "async-io", "data-encoding", "dns-parser", - "futures 0.3.17", + "futures 0.3.18", "if-watch", "lazy_static", "libp2p-core", @@ -4139,37 +3965,51 @@ dependencies = [ "log", "rand 0.8.4", "smallvec", - "socket2 0.4.1", + "socket2 0.4.2", "void", ] +[[package]] +name = "libp2p-metrics" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4af432fcdd2f8ba4579b846489f8f0812cfd738ced2c0af39df9b1c48bbb6ab2" +dependencies = [ + "libp2p-core", + "libp2p-identify", + "libp2p-kad", + "libp2p-ping", + "libp2p-swarm", + "open-metrics-client", +] + [[package]] name = "libp2p-mplex" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "313d9ea526c68df4425f580024e67a9d3ffd49f2c33de5154b1f5019816f7a99" +checksum = "7f2cd64ef597f40e14bfce0497f50ecb63dd6d201c61796daeb4227078834fbf" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.1.0", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "log", "nohash-hasher", "parking_lot 0.11.2", "rand 0.7.3", "smallvec", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", ] [[package]] name = "libp2p-noise" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f1db7212f342b6ba7c981cc40e31f76e9e56cb48e65fa4c142ecaca5839523e" +checksum = "a8772c7a99088221bb7ca9c5c0574bf55046a7ab4c319f3619b275f28c8fb87a" dependencies = [ "bytes 1.1.0", "curve25519-dalek 3.2.0", - "futures 0.3.17", + "futures 0.3.18", "lazy_static", "libp2p-core", "log", @@ -4185,11 +4025,11 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2482cfd9eb0b7a0baaf3e7b329dc4f2785181a161b1a47b7192f8d758f54a439" +checksum = "80ef7b0ec5cf06530d9eb6cf59ae49d46a2c45663bde31c25a12f682664adbcf" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "libp2p-swarm", "log", @@ -4200,28 +4040,28 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13b4783e5423870b9a5c199f65a7a3bc66d86ab56b2b9beebf3c338d889cf8e4" +checksum = "5fba1a6ff33e4a274c89a3b1d78b9f34f32af13265cc5c46c16938262d4e945a" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.1.0", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "log", "prost", "prost-build", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-pnet" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07cb4dd4b917e5b40ddefe49b96b07adcd8d342e0317011d175b7b2bb1dcc974" +checksum = "0f1a458bbda880107b5b36fcb9b5a1ef0c329685da0e203ed692a8ebe64cc92c" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "log", "pin-project 1.0.8", "rand 0.7.3", @@ -4231,13 +4071,13 @@ dependencies = [ [[package]] name = "libp2p-relay" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0133f6cfd81cdc16e716de2982e012c62e6b9d4f12e41967b3ee361051c622aa" +checksum = "2852b61c90fa8ce3c8fcc2aba76e6cefc20d648f9df29157d6b3a916278ef3e3" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.1.0", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "libp2p-core", "libp2p-swarm", @@ -4247,39 +4087,60 @@ dependencies = [ "prost-build", "rand 0.7.3", "smallvec", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-rendezvous" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14a6d2b9e7677eff61dc3d2854876aaf3976d84a01ef6664b610c77a0c9407c5" +dependencies = [ + "asynchronous-codec 0.6.0", + "bimap", + "futures 0.3.18", + "libp2p-core", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "rand 0.8.4", + "sha2 0.9.8", + "thiserror", + "unsigned-varint 0.7.1", "void", "wasm-timer", ] [[package]] name = "libp2p-request-response" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06cdae44b6821466123af93cbcdec7c9e6ba9534a8af9cdc296446d39416d241" +checksum = "a877a4ced6d46bf84677e1974e8cf61fb434af73b2e96fb48d6cb6223a4634d8" dependencies = [ "async-trait", "bytes 1.1.0", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "libp2p-swarm", "log", - "lru 0.6.6", - "minicbor", + "lru 0.7.0", "rand 0.7.3", "smallvec", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", "wasm-timer", ] [[package]] name = "libp2p-swarm" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7083861341e1555467863b4cd802bea1e8c4787c0f7b5110097d0f1f3248f9a9" +checksum = "3f5184a508f223bc100a12665517773fb8730e9f36fc09eefb670bf01b107ae9" dependencies = [ "either", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "log", "rand 0.7.3", @@ -4290,9 +4151,9 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8cb308d4fc854869f5abb54fdab0833d2cf670d407c745849dc47e6e08d79c" +checksum = "072c290f727d39bdc4e9d6d1c847978693d25a673bd757813681e33e5f6c00c2" dependencies = [ "quote", "syn", @@ -4300,40 +4161,40 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79edd26b6b4bb5feee210dcda562dca186940dfecb0024b979c3f50824b3bf28" +checksum = "7399c5b6361ef525d41c11fcf51635724f832baf5819b30d3d873eabb4fbae4b" dependencies = [ "async-io", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "if-watch", "ipnet", "libc", "libp2p-core", "log", - "socket2 0.4.1", + "socket2 0.4.2", ] [[package]] name = "libp2p-uds" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280e793440dd4e9f273d714f4497325c72cddb0fe85a49f9a03c88f41dd20182" +checksum = "b8b7563e46218165dfd60f64b96f7ce84590d75f53ecbdc74a7dd01450dc5973" dependencies = [ "async-std", - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f553b7140fad3d7a76f50497b0ea591e26737d9607428a75509fc191e4d1b1f6" +checksum = "1008a302b73c5020251f9708c653f5ed08368e530e247cc9cd2f109ff30042cf" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -4343,29 +4204,29 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddf99dcbf5063e9d59087f61b1e85c686ceab2f5abedb472d32288065c0e5e27" +checksum = "22e12df82d1ed64969371a9e65ea92b91064658604cc2576c2757f18ead9a1cf" dependencies = [ "either", - "futures 0.3.17", + "futures 0.3.18", "futures-rustls", "libp2p-core", "log", "quicksink", "rw-stream-sink", - "soketto 0.4.2", + "soketto 0.7.1", "url 2.2.2", "webpki-roots", ] [[package]] name = "libp2p-yamux" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "214cc0dd9c37cbed27f0bb1eba0c41bbafdb93a8be5e9d6ae1e6b4b42cd044bf" +checksum = "4e7362abb8867d7187e7e93df17f460d554c997fc5c8ac57dc1259057f6889af" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "libp2p-core", "parking_lot 0.11.2", "thiserror", @@ -4384,44 +4245,6 @@ dependencies = [ "libc", ] -[[package]] -name = "libsecp256k1" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core 0.2.2", - "libsecp256k1-gen-ecmult 0.2.1", - "libsecp256k1-gen-genmult 0.2.1", - "rand 0.7.3", - "serde", - "sha2 0.9.8", - "typenum", -] - -[[package]] -name = "libsecp256k1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core 0.2.2", - "libsecp256k1-gen-ecmult 0.2.1", - "libsecp256k1-gen-genmult 0.2.1", - "rand 0.7.3", - "serde", - "sha2 0.9.8", - "typenum", -] - [[package]] name = "libsecp256k1" version = "0.7.0" @@ -4429,29 +4252,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" dependencies = [ "arrayref", - "base64 0.13.0", + "base64", "digest 0.9.0", "hmac-drbg", - "libsecp256k1-core 0.3.0", - "libsecp256k1-gen-ecmult 0.3.0", - "libsecp256k1-gen-genmult 0.3.0", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", "rand 0.8.4", "serde", "sha2 0.9.8", "typenum", ] -[[package]] -name = "libsecp256k1-core" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - [[package]] name = "libsecp256k1-core" version = "0.3.0" @@ -4463,31 +4275,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3" -dependencies = [ - "libsecp256k1-core 0.2.2", -] - [[package]] name = "libsecp256k1-gen-ecmult" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" dependencies = [ - "libsecp256k1-core 0.3.0", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d" -dependencies = [ - "libsecp256k1-core 0.2.2", + "libsecp256k1-core", ] [[package]] @@ -4496,7 +4290,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" dependencies = [ - "libsecp256k1-core 0.3.0", + "libsecp256k1-core", ] [[package]] @@ -4536,6 +4330,12 @@ dependencies = [ "statrs", ] +[[package]] +name = "linux-raw-sys" +version = "0.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "687387ff42ec7ea4f2149035a5675fedb675d26f98db90a1846ac63d3addb5f5" + [[package]] name = "lock_api" version = "0.3.4" @@ -4649,9 +4449,9 @@ checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "matrixmultiply" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a8a15b776d9dfaecd44b03c5828c2199cddff5247215858aac14624f8d6b741" +checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" dependencies = [ "rawpointer", ] @@ -4681,6 +4481,15 @@ dependencies = [ "libc", ] +[[package]] +name = "memmap2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4647a11b578fead29cdbb34d4adef8dd3dc35b876c9c6d5240d83f205abfe96e" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" version = "0.6.4" @@ -4736,21 +4545,22 @@ dependencies = [ "async-trait", "bp-messages", "bp-runtime", - "futures 0.3.17", + "futures 0.3.18", "hex", "log", "num-traits", "parking_lot 0.11.2", "relay-utils", + "sp-arithmetic", ] [[package]] name = "metered-channel" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "derive_more", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "thiserror", "tracing", @@ -4758,11 +4568,11 @@ dependencies = [ [[package]] name = "mick-jaeger" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c023c3f16109e7f33aa451f773fd61070e265b4977d0b6e344a51049296dd7df" +checksum = "eaa77fad8461bb1e0d01be11299e24c6e544007715ed442bfec29f165dc487ae" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "rand 0.7.3", "thrift", ] @@ -4771,14 +4581,18 @@ dependencies = [ name = "millau-bridge-node" version = "0.1.0" dependencies = [ + "beefy-gadget", + "beefy-gadget-rpc", + "beefy-primitives", "bp-millau", "bp-runtime", "frame-benchmarking", "frame-benchmarking-cli", - "jsonrpc-core 18.0.0", + "jsonrpc-core", "millau-runtime", "node-inspect", "pallet-bridge-messages", + "pallet-mmr-rpc", "pallet-transaction-payment-rpc", "sc-basic-authorship", "sc-cli", @@ -4809,6 +4623,7 @@ dependencies = [ name = "millau-runtime" version = "0.1.0" dependencies = [ + "beefy-primitives", "bp-header-chain", "bp-messages", "bp-millau", @@ -4821,14 +4636,18 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "hex-literal 0.3.3", + "hex-literal", "pallet-aura", "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", "pallet-bridge-dispatch", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-token-swap", "pallet-grandpa", + "pallet-mmr", + "pallet-mmr-primitives", "pallet-randomness-collective-flip", "pallet-session", "pallet-shift-session-manager", @@ -4862,24 +4681,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] -name = "minicbor" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51aa5bb0ca22415daca596a227b507f880ad1b2318a87fa9325312a5d285ca0d" -dependencies = [ - "minicbor-derive", -] - -[[package]] -name = "minicbor-derive" -version = "0.6.4" +name = "minimal-lexical" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54999f917cd092b13904737e26631aa2b2b88d625db68e4bab461dcd8006c788" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" @@ -4912,9 +4717,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ "libc", "log", @@ -4958,9 +4763,9 @@ dependencies = [ [[package]] name = "more-asserts" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" +checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" [[package]] name = "multiaddr" @@ -4976,7 +4781,7 @@ dependencies = [ "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", "url 2.2.2", ] @@ -5018,7 +4823,7 @@ dependencies = [ "generic-array 0.14.4", "multihash-derive", "sha2 0.9.8", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", ] [[package]] @@ -5043,16 +4848,16 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d91ec0a2440aaff5f78ec35631a7027d50386c6163aa975f7caa0d5da4b6ff8" +checksum = "56a336acba8bc87c8876f6425407dbbe6c417bf478b22015f8fb0994ef3bc0ab" dependencies = [ "bytes 1.1.0", - "futures 0.3.17", + "futures 0.3.18", "log", "pin-project 1.0.8", "smallvec", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.1", ] [[package]] @@ -5093,24 +4898,6 @@ dependencies = [ "rand 0.8.4", ] -[[package]] -name = "native-tls" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "net2" version = "0.2.37" @@ -5125,7 +4912,7 @@ dependencies = [ [[package]] name = "node-inspect" version = "0.9.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "derive_more", "parity-scale-codec", @@ -5153,13 +4940,12 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "6.1.2" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" +checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" dependencies = [ - "bitvec 0.19.5", - "funty", "memchr", + "minimal-lexical", "version_check", ] @@ -5257,9 +5043,9 @@ dependencies = [ [[package]] name = "object" -version = "0.26.2" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" dependencies = [ "crc32fast", "indexmap", @@ -5285,17 +5071,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] -name = "openssl" -version = "0.10.36" +name = "open-metrics-client" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" +checksum = "7337d80c23c2d8b1349563981bc4fb531220733743ba8115454a67b181173f0d" dependencies = [ - "bitflags", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-sys", + "dtoa", + "itoa", + "open-metrics-client-derive-text-encode", + "owning_ref", +] + +[[package]] +name = "open-metrics-client-derive-text-encode" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c83b586f00268c619c1cb3340ec1a6f59dd9ba1d9833a273a68e6d5cd8ffc" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -5306,9 +5101,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.66" +version = "0.9.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996d2d305e561b70d1ee0c53f1542833f4e1ac6ce9a6708b6ff2738ca67dc82" +checksum = "7df13d165e607909b363a4757a6f133f8a818a74e9d3a98d09c6128e15fa4c73" dependencies = [ "autocfg", "cc", @@ -5338,7 +5133,7 @@ dependencies = [ [[package]] name = "pallet-aura" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5354,7 +5149,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5370,7 +5165,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5385,7 +5180,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5409,7 +5204,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-election-provider-support", "frame-support", @@ -5424,7 +5219,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5439,7 +5234,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "beefy-primitives", "frame-support", @@ -5447,63 +5242,27 @@ dependencies = [ "pallet-session", "parity-scale-codec", "scale-info", - "serde", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-beefy-mmr" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" -dependencies = [ - "beefy-merkle-tree", - "beefy-primitives", - "frame-support", - "frame-system", - "hex", - "libsecp256k1 0.7.0", - "log", - "pallet-beefy", - "pallet-mmr", - "pallet-mmr-primitives", - "pallet-session", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bounties" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" -dependencies = [ - "frame-support", - "frame-system", - "log", - "pallet-treasury", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", + "serde", "sp-runtime", "sp-std", ] [[package]] -name = "pallet-bridge-currency-exchange" -version = "0.1.0" +name = "pallet-beefy-mmr" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "bp-currency-exchange", - "bp-header-chain", - "frame-benchmarking", + "beefy-merkle-tree", + "beefy-primitives", "frame-support", "frame-system", + "hex", + "libsecp256k1", "log", + "pallet-beefy", + "pallet-mmr", + "pallet-mmr-primitives", + "pallet-session", "parity-scale-codec", "scale-info", "serde", @@ -5514,14 +5273,14 @@ dependencies = [ ] [[package]] -name = "pallet-bridge-dispatch" -version = "0.1.0" +name = "pallet-bounties" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "bp-message-dispatch", - "bp-runtime", "frame-support", "frame-system", "log", + "pallet-treasury", "parity-scale-codec", "scale-info", "sp-core", @@ -5531,19 +5290,17 @@ dependencies = [ ] [[package]] -name = "pallet-bridge-eth-poa" +name = "pallet-bridge-dispatch" version = "0.1.0" dependencies = [ - "bp-eth-poa", - "frame-benchmarking", + "bp-message-dispatch", + "bp-runtime", "frame-support", "frame-system", - "hex-literal 0.3.3", - "libsecp256k1 0.7.0", "log", "parity-scale-codec", "scale-info", - "serde", + "sp-core", "sp-io", "sp-runtime", "sp-std", @@ -5577,7 +5334,7 @@ dependencies = [ name = "pallet-bridge-messages" version = "0.1.0" dependencies = [ - "bitvec 0.20.4", + "bitvec", "bp-message-dispatch", "bp-messages", "bp-runtime", @@ -5585,7 +5342,7 @@ dependencies = [ "frame-support", "frame-system", "hex", - "hex-literal 0.3.3", + "hex-literal", "log", "num-traits", "pallet-balances", @@ -5625,7 +5382,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5642,7 +5399,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5658,7 +5415,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-election-provider-support", "frame-support", @@ -5678,7 +5435,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5695,7 +5452,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5718,7 +5475,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5734,7 +5491,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5753,7 +5510,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5769,7 +5526,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5786,7 +5543,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", @@ -5804,7 +5561,7 @@ dependencies = [ [[package]] name = "pallet-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5820,9 +5577,9 @@ dependencies = [ [[package]] name = "pallet-mmr-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "jsonrpc-core 18.0.0", + "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "pallet-mmr-primitives", @@ -5837,7 +5594,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5851,7 +5608,7 @@ dependencies = [ [[package]] name = "pallet-nicks" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5865,7 +5622,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5882,7 +5639,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5896,7 +5653,7 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5910,7 +5667,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5926,7 +5683,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -5962,7 +5719,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-election-provider-support", "frame-support", @@ -5983,7 +5740,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -5994,7 +5751,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -6008,7 +5765,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6026,7 +5783,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -6044,7 +5801,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -6061,9 +5818,9 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "jsonrpc-core 18.0.0", + "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "pallet-transaction-payment-rpc-runtime-api", @@ -6078,7 +5835,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6089,7 +5846,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -6105,7 +5862,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -6120,7 +5877,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-support", "frame-system", @@ -6133,8 +5890,8 @@ dependencies = [ [[package]] name = "pallet-xcm" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "frame-support", "frame-system", @@ -6152,7 +5909,7 @@ dependencies = [ [[package]] name = "parachain-info" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693" +source = "git+https://github.com/paritytech/cumulus?branch=master#5b245a21eb84ff7b1da6b47ad4386bda3dfb5880" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -6162,17 +5919,11 @@ dependencies = [ "serde", ] -[[package]] -name = "parity-bytes" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67" - [[package]] name = "parity-db" -version = "0.3.2" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91b679c6acc14fac74382942e2b73bea441686a33430b951ea03b5aeb6a7f254" +checksum = "78a95abf24f1097c6e3181abbbbfc3630b3b5e681470940f719b69acb4911c7f" dependencies = [ "blake2-rfc", "crc32fast", @@ -6181,7 +5932,7 @@ dependencies = [ "libc", "log", "lz4", - "memmap2", + "memmap2 0.2.3", "parking_lot 0.11.2", "rand 0.8.4", "snap", @@ -6193,8 +5944,8 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ - "arrayvec 0.7.1", - "bitvec 0.20.4", + "arrayvec 0.7.2", + "bitvec", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive", @@ -6225,7 +5976,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "libc", "log", "rand 0.7.3", @@ -6279,9 +6030,9 @@ checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" [[package]] name = "parity-ws" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab8a461779bd022964cae2b4989fa9c99deb270bec162da2125ec03c09fcaa" +checksum = "5983d3929ad50f12c3eb9a6743f19d691866ecd44da74c0a3308c3f8a56df0c6" dependencies = [ "byteorder", "bytes 0.4.12", @@ -6352,9 +6103,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" +checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" [[package]] name = "pbkdf2" @@ -6437,9 +6188,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" +checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" dependencies = [ "fixedbitset", "indexmap", @@ -6505,31 +6256,22 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" - -[[package]] -name = "plain_hasher" -version = "0.2.3" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e19e6491bdde87c2c43d70f4c194bc8a758f2eb732df00f61e43f7362e3b4cc" -dependencies = [ - "crunchy", -] +checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" [[package]] name = "platforms" -version = "1.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" +checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "polkadot-approval-distribution" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6540,10 +6282,10 @@ dependencies = [ [[package]] name = "polkadot-availability-bitfield-distribution" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-util", @@ -6553,11 +6295,11 @@ dependencies = [ [[package]] name = "polkadot-availability-distribution" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "derive_more", - "futures 0.3.17", + "futures 0.3.18", "lru 0.7.0", "parity-scale-codec", "polkadot-erasure-coding", @@ -6575,10 +6317,10 @@ dependencies = [ [[package]] name = "polkadot-availability-recovery" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "lru 0.7.0", "parity-scale-codec", "polkadot-erasure-coding", @@ -6595,11 +6337,11 @@ dependencies = [ [[package]] name = "polkadot-cli" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "frame-benchmarking-cli", - "futures 0.3.17", + "futures 0.3.18", "log", "polkadot-node-core-pvf", "polkadot-service", @@ -6615,8 +6357,8 @@ dependencies = [ [[package]] name = "polkadot-client" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "beefy-primitives", "frame-benchmarking", @@ -6645,12 +6387,12 @@ dependencies = [ [[package]] name = "polkadot-collator-protocol" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "always-assert", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6666,8 +6408,8 @@ dependencies = [ [[package]] name = "polkadot-core-primitives" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "parity-scale-codec", "parity-util-mem", @@ -6679,11 +6421,11 @@ dependencies = [ [[package]] name = "polkadot-dispute-distribution" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "derive_more", - "futures 0.3.17", + "futures 0.3.18", "lru 0.7.0", "parity-scale-codec", "polkadot-erasure-coding", @@ -6701,8 +6443,8 @@ dependencies = [ [[package]] name = "polkadot-erasure-coding" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "parity-scale-codec", "polkadot-node-primitives", @@ -6715,10 +6457,10 @@ dependencies = [ [[package]] name = "polkadot-gossip-support" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -6735,11 +6477,11 @@ dependencies = [ [[package]] name = "polkadot-network-bridge" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "async-trait", - "futures 0.3.17", + "futures 0.3.18", "parity-scale-codec", "parking_lot 0.11.2", "polkadot-node-network-protocol", @@ -6754,10 +6496,10 @@ dependencies = [ [[package]] name = "polkadot-node-collation-generation" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6772,12 +6514,12 @@ dependencies = [ [[package]] name = "polkadot-node-core-approval-voting" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "bitvec 0.20.4", + "bitvec", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "kvdb", "lru 0.7.0", @@ -6800,11 +6542,11 @@ dependencies = [ [[package]] name = "polkadot-node-core-av-store" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "bitvec 0.20.4", - "futures 0.3.17", + "bitvec", + "futures 0.3.18", "futures-timer 3.0.2", "kvdb", "parity-scale-codec", @@ -6820,11 +6562,11 @@ dependencies = [ [[package]] name = "polkadot-node-core-backing" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "bitvec 0.20.4", - "futures 0.3.17", + "bitvec", + "futures 0.3.18", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6838,10 +6580,10 @@ dependencies = [ [[package]] name = "polkadot-node-core-bitfield-signing" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", @@ -6853,11 +6595,11 @@ dependencies = [ [[package]] name = "polkadot-node-core-candidate-validation" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "async-trait", - "futures 0.3.17", + "futures 0.3.18", "parity-scale-codec", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -6871,10 +6613,10 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-api" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", @@ -6886,10 +6628,10 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-selection" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "kvdb", "parity-scale-codec", @@ -6903,12 +6645,12 @@ dependencies = [ [[package]] name = "polkadot-node-core-dispute-coordinator" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "bitvec 0.20.4", + "bitvec", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "kvdb", "parity-scale-codec", "polkadot-node-primitives", @@ -6920,26 +6662,13 @@ dependencies = [ "tracing", ] -[[package]] -name = "polkadot-node-core-dispute-participation" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" -dependencies = [ - "futures 0.3.17", - "polkadot-node-primitives", - "polkadot-node-subsystem", - "polkadot-primitives", - "thiserror", - "tracing", -] - [[package]] name = "polkadot-node-core-parachains-inherent" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "async-trait", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "polkadot-node-subsystem", "polkadot-primitives", @@ -6952,11 +6681,10 @@ dependencies = [ [[package]] name = "polkadot-node-core-provisioner" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "bitvec 0.20.4", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "polkadot-node-subsystem", "polkadot-node-subsystem-util", @@ -6967,14 +6695,14 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "always-assert", "assert_matches", "async-process", "async-std", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "libc", "parity-scale-codec", @@ -6998,10 +6726,10 @@ dependencies = [ [[package]] name = "polkadot-node-core-runtime-api" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "memory-lru", "parity-util-mem", "polkadot-node-subsystem", @@ -7016,8 +6744,8 @@ dependencies = [ [[package]] name = "polkadot-node-jaeger" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "async-std", "lazy_static", @@ -7034,10 +6762,10 @@ dependencies = [ [[package]] name = "polkadot-node-metrics" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "metered-channel", "substrate-prometheus-endpoint", @@ -7045,29 +6773,29 @@ dependencies = [ [[package]] name = "polkadot-node-network-protocol" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "async-trait", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "parity-scale-codec", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", "sc-authority-discovery", "sc-network", - "strum 0.21.0", + "strum 0.23.0", "thiserror", ] [[package]] name = "polkadot-node-primitives" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "bounded-vec", - "futures 0.3.17", + "futures 0.3.18", "parity-scale-codec", "polkadot-parachain", "polkadot-primitives", @@ -7085,8 +6813,8 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "polkadot-node-jaeger", "polkadot-node-subsystem-types", @@ -7095,11 +6823,11 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-types" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "derive_more", - "futures 0.3.17", + "futures 0.3.18", "polkadot-node-jaeger", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -7114,12 +6842,12 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-util" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "async-trait", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "itertools", "lru 0.7.0", "metered-channel", @@ -7128,6 +6856,7 @@ dependencies = [ "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", + "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-overseer", "polkadot-primitives", @@ -7141,10 +6870,10 @@ dependencies = [ [[package]] name = "polkadot-overseer" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "lru 0.7.0", "parity-util-mem", @@ -7162,11 +6891,11 @@ dependencies = [ [[package]] name = "polkadot-overseer-gen" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "async-trait", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "metered-channel", "pin-project 1.0.8", @@ -7179,8 +6908,8 @@ dependencies = [ [[package]] name = "polkadot-overseer-gen-proc-macro" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -7190,8 +6919,8 @@ dependencies = [ [[package]] name = "polkadot-parachain" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "derive_more", "frame-support", @@ -7207,12 +6936,12 @@ dependencies = [ [[package]] name = "polkadot-primitives" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ - "bitvec 0.20.4", + "bitvec", "frame-system", - "hex-literal 0.3.3", + "hex-literal", "parity-scale-codec", "parity-util-mem", "polkadot-core-primitives", @@ -7237,12 +6966,12 @@ dependencies = [ [[package]] name = "polkadot-rpc" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "beefy-gadget", "beefy-gadget-rpc", - "jsonrpc-core 18.0.0", + "jsonrpc-core", "pallet-mmr-rpc", "pallet-transaction-payment-rpc", "polkadot-primitives", @@ -7268,11 +6997,11 @@ dependencies = [ [[package]] name = "polkadot-runtime" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "beefy-primitives", - "bitvec 0.20.4", + "bitvec", "frame-election-provider-support", "frame-executive", "frame-support", @@ -7283,6 +7012,7 @@ dependencies = [ "pallet-authority-discovery", "pallet-authorship", "pallet-babe", + "pallet-bags-list", "pallet-balances", "pallet-bounties", "pallet-collective", @@ -7310,6 +7040,7 @@ dependencies = [ "pallet-treasury", "pallet-utility", "pallet-vesting", + "pallet-xcm", "parity-scale-codec", "polkadot-primitives", "polkadot-runtime-common", @@ -7336,20 +7067,23 @@ dependencies = [ "sp-version", "static_assertions", "substrate-wasm-builder", + "xcm", + "xcm-builder", + "xcm-executor", ] [[package]] name = "polkadot-runtime-common" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "beefy-primitives", - "bitvec 0.20.4", + "bitvec", "frame-election-provider-support", "frame-support", "frame-system", "impl-trait-for-tuples", - "libsecp256k1 0.7.0", + "libsecp256k1", "log", "pallet-authorship", "pallet-bags-list", @@ -7385,17 +7119,18 @@ dependencies = [ [[package]] name = "polkadot-runtime-parachains" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "bitflags", - "bitvec 0.20.4", + "bitvec", "derive_more", "frame-support", "frame-system", "log", "pallet-authority-discovery", "pallet-authorship", + "pallet-babe", "pallet-balances", "pallet-session", "pallet-staking", @@ -7423,15 +7158,15 @@ dependencies = [ [[package]] name = "polkadot-service" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "async-trait", "beefy-gadget", "beefy-primitives", "frame-system-rpc-runtime-api", - "futures 0.3.17", - "hex-literal 0.3.3", + "futures 0.3.18", + "hex-literal", "kvdb", "kvdb-rocksdb 0.14.0", "lru 0.7.0", @@ -7458,7 +7193,6 @@ dependencies = [ "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", - "polkadot-node-core-dispute-participation", "polkadot-node-core-parachains-inherent", "polkadot-node-core-provisioner", "polkadot-node-core-runtime-api", @@ -7487,6 +7221,7 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", + "sc-offchain", "sc-service", "sc-sync-state-rpc", "sc-telemetry", @@ -7518,12 +7253,12 @@ dependencies = [ [[package]] name = "polkadot-statement-distribution" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "arrayvec 0.5.2", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "indexmap", "parity-scale-codec", "polkadot-node-network-protocol", @@ -7539,8 +7274,8 @@ dependencies = [ [[package]] name = "polkadot-statement-table" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -7549,11 +7284,11 @@ dependencies = [ [[package]] name = "polkadot-test-runtime" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "beefy-primitives", - "bitvec 0.20.4", + "bitvec", "frame-election-provider-support", "frame-executive", "frame-support", @@ -7610,13 +7345,13 @@ dependencies = [ [[package]] name = "polkadot-test-service" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "frame-benchmarking", "frame-system", "futures 0.1.31", - "futures 0.3.17", + "futures 0.3.18", "hex", "pallet-balances", "pallet-staking", @@ -7663,9 +7398,9 @@ dependencies = [ [[package]] name = "polling" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92341d779fa34ea8437ef4d82d440d5e1ce3f3ff7f824aa64424cd481f9a1f25" +checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" dependencies = [ "cfg-if 1.0.0", "libc", @@ -7699,9 +7434,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" [[package]] name = "primitive-types" @@ -7792,26 +7527,20 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.29" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" dependencies = [ "unicode-xid", ] [[package]] name = "prometheus" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5986aa8d62380092d2f50f8b1cdba9cb9b6731ffd4b25b51fd126b6c3e05b99c" +checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" dependencies = [ "cfg-if 1.0.0", "fnv", @@ -7823,9 +7552,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" +checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes 1.1.0", "prost-derive", @@ -7833,27 +7562,29 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" +checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" dependencies = [ "bytes 1.1.0", "heck", "itertools", + "lazy_static", "log", "multimap", "petgraph", "prost", "prost-types", + "regex", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" +checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", @@ -7864,9 +7595,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" +checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes 1.1.0", "prost", @@ -7924,12 +7655,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" - [[package]] name = "radium" version = "0.6.2" @@ -8002,9 +7727,9 @@ dependencies = [ [[package]] name = "rand_distr" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "051b398806e42b9cd04ad9ec8f81e355d0a382c543ac6672c62f5a5b452ef142" +checksum = "964d548f8e7d12e102ef183a0de7e98180c9f8729f555897a857b96e48122d2f" dependencies = [ "num-traits", "rand 0.8.4", @@ -8128,13 +7853,12 @@ dependencies = [ [[package]] name = "regalloc" -version = "0.0.31" +version = "0.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" +checksum = "a6304468554ed921da3d32c355ea107b8d13d7b8996c3adfb7aab48d3bc321f4" dependencies = [ "log", "rustc-hash", - "serde", "smallvec", ] @@ -8176,24 +7900,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "relay-ethereum-client" -version = "0.1.0" -dependencies = [ - "async-std", - "bp-eth-poa", - "headers-relay", - "hex-literal 0.3.3", - "jsonrpsee-proc-macros", - "jsonrpsee-ws-client", - "libsecp256k1 0.7.0", - "log", - "relay-utils", - "thiserror", - "tokio", - "web3", -] - [[package]] name = "relay-kusama-client" version = "0.1.0" @@ -8317,10 +8023,9 @@ dependencies = [ "finality-relay", "frame-support", "frame-system", - "futures 0.3.17", - "headers-relay", - "jsonrpsee-proc-macros", - "jsonrpsee-ws-client", + "futures 0.3.18", + "jsonrpsee-proc-macros 0.3.1", + "jsonrpsee-ws-client 0.3.1", "log", "num-traits", "pallet-balances", @@ -8353,7 +8058,7 @@ dependencies = [ "backoff", "bp-runtime", "env_logger 0.8.4", - "futures 0.3.17", + "futures 0.3.18", "isahc", "jsonpath_lib", "log", @@ -8403,11 +8108,10 @@ dependencies = [ [[package]] name = "remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "env_logger 0.9.0", - "jsonrpsee-proc-macros", - "jsonrpsee-ws-client", + "jsonrpsee", "log", "parity-scale-codec", "serde", @@ -8427,41 +8131,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "reqwest" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" -dependencies = [ - "base64 0.13.0", - "bytes 1.1.0", - "encoding_rs", - "futures-core", - "futures-util", - "http", - "http-body", - "hyper", - "hyper-tls", - "ipnet", - "js-sys", - "lazy_static", - "log", - "mime", - "native-tls", - "percent-encoding 2.1.0", - "pin-project-lite 0.2.7", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-native-tls", - "url 2.2.2", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg 0.7.0", -] - [[package]] name = "resolv-conf" version = "0.7.0" @@ -8474,32 +8143,38 @@ dependencies = [ [[package]] name = "retain_mut" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c17925a9027d298a4603d286befe3f9dc0e8ed02523141914eb628798d6e5b" +checksum = "448296241d034b96c11173591deaa1302f2c17b56092106c1f92c1bc0183a8c9" [[package]] name = "rialto-bridge-node" version = "0.1.0" dependencies = [ + "beefy-gadget", + "beefy-gadget-rpc", + "beefy-primitives", "bp-rialto", "bp-runtime", "frame-benchmarking", "frame-benchmarking-cli", "frame-system-rpc-runtime-api", - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "kvdb", "kvdb-rocksdb 0.12.1", "lru 0.7.0", "node-inspect", "pallet-bridge-messages", + "pallet-mmr-primitives", + "pallet-mmr-rpc", "pallet-transaction-payment-rpc", "pallet-transaction-payment-rpc-runtime-api", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", + "polkadot-client", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-gossip-support", @@ -8513,7 +8188,6 @@ dependencies = [ "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", - "polkadot-node-core-dispute-participation", "polkadot-node-core-parachains-inherent", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", @@ -8580,8 +8254,8 @@ dependencies = [ "derive_more", "frame-benchmarking", "frame-benchmarking-cli", - "hex-literal 0.3.3", - "jsonrpc-core 18.0.0", + "hex-literal", + "jsonrpc-core", "log", "pallet-transaction-payment-rpc", "parity-scale-codec", @@ -8680,8 +8354,7 @@ dependencies = [ name = "rialto-runtime" version = "0.1.0" dependencies = [ - "bp-currency-exchange", - "bp-eth-poa", + "beefy-primitives", "bp-header-chain", "bp-message-dispatch", "bp-messages", @@ -8694,18 +8367,20 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "hex-literal 0.3.3", - "libsecp256k1 0.7.0", + "hex-literal", + "libsecp256k1", "log", "pallet-authority-discovery", "pallet-babe", "pallet-balances", - "pallet-bridge-currency-exchange", + "pallet-beefy", + "pallet-beefy-mmr", "pallet-bridge-dispatch", - "pallet-bridge-eth-poa", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-grandpa", + "pallet-mmr", + "pallet-mmr-primitives", "pallet-session", "pallet-shift-session-manager", "pallet-sudo", @@ -8781,6 +8456,23 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "rsix" +version = "0.23.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f64c5788d5aab8b75441499d99576a24eb09f76fb267b36fec7e3d970c66431" +dependencies = [ + "bitflags", + "cc", + "errno", + "io-lifetimes", + "itoa", + "libc", + "linux-raw-sys", + "once_cell", + "rustc_version 0.4.0", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -8817,13 +8509,22 @@ dependencies = [ "semver 0.11.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.4", +] + [[package]] name = "rustls" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.13.0", + "base64", "log", "ring", "sct", @@ -8842,13 +8543,19 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustversion" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" + [[package]] name = "rw-stream-sink" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "pin-project 0.4.28", "static_assertions", ] @@ -8870,9 +8577,9 @@ dependencies = [ [[package]] name = "salsa20" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" +checksum = "0c0fbb5f676da676c260ba276a8f43a8dc67cf02d1438423aeb1c677a7212686" dependencies = [ "cipher", ] @@ -8888,8 +8595,8 @@ dependencies = [ [[package]] name = "sc-allocator" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +version = "4.1.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "log", "sp-core", @@ -8900,11 +8607,11 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "ip_network", "libp2p", @@ -8927,9 +8634,9 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -8950,7 +8657,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -8966,9 +8673,10 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "impl-trait-for-tuples", + "memmap2 0.5.0", "parity-scale-codec", "sc-chain-spec-derive", "sc-network", @@ -8982,7 +8690,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -8993,11 +8701,11 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "chrono", "fdlimit", - "futures 0.3.17", + "futures 0.3.18", "hex", "libp2p", "log", @@ -9031,10 +8739,10 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "fnv", - "futures 0.3.17", + "futures 0.3.18", "hash-db", "log", "parity-scale-codec", @@ -9059,7 +8767,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "hash-db", "kvdb", @@ -9084,10 +8792,10 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "libp2p", "log", @@ -9108,11 +8816,11 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "log", "parity-scale-codec", "sc-block-builder", @@ -9137,12 +8845,12 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "derive_more", "fork-tree", - "futures 0.3.17", + "futures 0.3.18", "log", "merlin", "num-bigint", @@ -9180,11 +8888,11 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "derive_more", - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "sc-consensus-babe", @@ -9204,7 +8912,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9217,10 +8925,10 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -9243,7 +8951,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "sc-client-api", "sp-authorship", @@ -9254,10 +8962,10 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "lazy_static", - "libsecp256k1 0.6.0", + "libsecp256k1", "log", "parity-scale-codec", "parking_lot 0.11.2", @@ -9266,6 +8974,7 @@ dependencies = [ "sc-executor-wasmtime", "sp-api", "sp-core", + "sp-core-hashing-proc-macro", "sp-externalities", "sp-io", "sp-panic-handler", @@ -9280,7 +8989,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "derive_more", "environmental", @@ -9298,7 +9007,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "log", "parity-scale-codec", @@ -9314,7 +9023,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "cfg-if 1.0.0", "libc", @@ -9332,14 +9041,14 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "derive_more", "dyn-clone", "finality-grandpa", "fork-tree", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -9369,12 +9078,12 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", @@ -9393,10 +9102,10 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "log", "parity-util-mem", @@ -9410,7 +9119,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "derive_more", @@ -9422,28 +9131,10 @@ dependencies = [ "sp-keystore", ] -[[package]] -name = "sc-light" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" -dependencies = [ - "hash-db", - "parity-scale-codec", - "parking_lot 0.11.2", - "sc-client-api", - "sc-executor", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-externalities", - "sp-runtime", - "sp-state-machine", -] - [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-std", "async-trait", @@ -9455,7 +9146,7 @@ dependencies = [ "either", "fnv", "fork-tree", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "hex", "ip_network", @@ -9463,7 +9154,7 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log", - "lru 0.6.6", + "lru 0.7.0", "parity-scale-codec", "parking_lot 0.11.2", "pin-project 1.0.8", @@ -9494,13 +9185,13 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "libp2p", "log", - "lru 0.6.6", + "lru 0.7.0", "sc-network", "sp-runtime", "substrate-prometheus-endpoint", @@ -9510,17 +9201,17 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "bytes 1.1.0", "fnv", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "hex", "hyper", "hyper-rustls", - "log", "num_cpus", + "once_cell", "parity-scale-codec", "parking_lot 0.11.2", "rand 0.7.3", @@ -9532,14 +9223,15 @@ dependencies = [ "sp-offchain", "sp-runtime", "threadpool", + "tracing", ] [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "libp2p", "log", "sc-utils", @@ -9549,8 +9241,8 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +version = "0.10.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9559,11 +9251,11 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "hash-db", - "jsonrpc-core 18.0.0", + "jsonrpc-core", "jsonrpc-pubsub", "log", "parity-scale-codec", @@ -9590,10 +9282,10 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", @@ -9615,10 +9307,10 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-http-server", "jsonrpc-ipc-server", "jsonrpc-pubsub", @@ -9632,15 +9324,15 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "directories", "exit-future", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "hash-db", - "jsonrpc-core 18.0.0", + "jsonrpc-core", "jsonrpc-pubsub", "log", "parity-scale-codec", @@ -9656,7 +9348,6 @@ dependencies = [ "sc-executor", "sc-informant", "sc-keystore", - "sc-light", "sc-network", "sc-offchain", "sc-rpc", @@ -9697,7 +9388,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "log", "parity-scale-codec", @@ -9711,9 +9402,9 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "jsonrpc-core 18.0.0", + "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "parity-scale-codec", @@ -9733,10 +9424,10 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "chrono", - "futures 0.3.17", + "futures 0.3.18", "libp2p", "log", "parking_lot 0.11.2", @@ -9751,12 +9442,13 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "ansi_term 0.12.1", "atty", "chrono", "lazy_static", + "libc", "log", "once_cell", "parking_lot 0.11.2", @@ -9781,7 +9473,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -9792,9 +9484,9 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "intervalier", "linked-hash-map", "log", @@ -9819,10 +9511,10 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "derive_more", - "futures 0.3.17", + "futures 0.3.18", "log", "serde", "sp-blockchain", @@ -9833,9 +9525,9 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "lazy_static", "prometheus", @@ -9847,7 +9539,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c55b744399c25532d63a0d2789b109df8d46fc93752d46b0782991a931a782f" dependencies = [ - "bitvec 0.20.4", + "bitvec", "cfg-if 1.0.0", "derive_more", "parity-scale-codec", @@ -9907,26 +9599,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scroll" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" -dependencies = [ - "scroll_derive", -] - -[[package]] -name = "scroll_derive" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "sct" version = "0.6.1" @@ -9937,24 +9609,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "secp256k1" -version = "0.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" -dependencies = [ - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "827cb7cce42533829c792fc51b82fbf18b125b45a702ef2c8be77fce65463a7b" -dependencies = [ - "cc", -] - [[package]] name = "secrecy" version = "0.8.0" @@ -10012,6 +9666,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ "semver-parser 0.10.2", +] + +[[package]] +name = "semver" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +dependencies = [ "serde", ] @@ -10039,16 +9701,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-big-array" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "883eee5198ea51720eab8be52a36cf6c0164ac90eea0ed95b649d5e35382404e" -dependencies = [ - "serde", - "serde_derive", -] - [[package]] name = "serde_derive" version = "1.0.130" @@ -10062,9 +9714,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.68" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" +checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527" dependencies = [ "indexmap", "itoa", @@ -10072,18 +9724,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_urlencoded" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - [[package]] name = "sha-1" version = "0.8.2" @@ -10154,9 +9794,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740223c51853f3145fe7c90360d2d4232f2b62e3449489c207eccde818979982" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] @@ -10188,9 +9828,9 @@ dependencies = [ [[package]] name = "signature" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" +checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" [[package]] name = "simba" @@ -10206,23 +9846,14 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" - -[[package]] -name = "slog" -version = "2.7.0" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" -dependencies = [ - "erased-serde", -] +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "slot-range-helper" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "enumn", "parity-scale-codec", @@ -10294,9 +9925,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" dependencies = [ "libc", "winapi 0.3.9", @@ -10304,29 +9935,13 @@ dependencies = [ [[package]] name = "soketto" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" -dependencies = [ - "base64 0.12.3", - "bytes 0.5.6", - "flate2", - "futures 0.3.17", - "httparse", - "log", - "rand 0.7.3", - "sha-1 0.9.8", -] - -[[package]] -name = "soketto" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4919971d141dbadaa0e82b5d369e2d7666c98e4625046140615ca363e50d4daa" +checksum = "a74e48087dbeed4833785c2f3352b59140095dc192dce966a3bfc155020a439f" dependencies = [ - "base64 0.13.0", + "base64", "bytes 1.1.0", - "futures 0.3.17", + "futures 0.3.18", "httparse", "log", "rand 0.8.4", @@ -10335,13 +9950,14 @@ dependencies = [ [[package]] name = "soketto" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74e48087dbeed4833785c2f3352b59140095dc192dce966a3bfc155020a439f" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64 0.13.0", + "base64", "bytes 1.1.0", - "futures 0.3.17", + "flate2", + "futures 0.3.18", "httparse", "log", "rand 0.8.4", @@ -10351,7 +9967,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "hash-db", "log", @@ -10368,7 +9984,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "blake2-rfc", "proc-macro-crate 1.1.0", @@ -10380,7 +9996,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "scale-info", @@ -10393,7 +10009,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "integer-sqrt", "num-traits", @@ -10408,7 +10024,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "scale-info", @@ -10421,7 +10037,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "parity-scale-codec", @@ -10433,7 +10049,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "sp-api", @@ -10445,11 +10061,11 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "log", - "lru 0.6.6", + "lru 0.7.0", "parity-scale-codec", "parking_lot 0.11.2", "sp-api", @@ -10463,10 +10079,10 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", - "futures 0.3.17", + "futures 0.3.18", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -10482,7 +10098,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "parity-scale-codec", @@ -10500,7 +10116,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "merlin", @@ -10523,10 +10139,11 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "scale-info", + "serde", "sp-arithmetic", "sp-runtime", ] @@ -10534,7 +10151,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -10546,20 +10163,21 @@ dependencies = [ [[package]] name = "sp-core" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "base58", + "bitflags", "blake2-rfc", "byteorder", "dyn-clonable", "ed25519-dalek", - "futures 0.3.17", + "futures 0.3.18", "hash-db", "hash256-std-hasher", "hex", "impl-serde", "lazy_static", - "libsecp256k1 0.6.0", + "libsecp256k1", "log", "merlin", "num-traits", @@ -10574,11 +10192,13 @@ dependencies = [ "secrecy", "serde", "sha2 0.9.8", + "sp-core-hashing", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", "sp-std", "sp-storage", + "ss58-registry", "substrate-bip39", "thiserror", "tiny-bip39", @@ -10588,10 +10208,34 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sp-core-hashing" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" +dependencies = [ + "blake2-rfc", + "byteorder", + "sha2 0.9.8", + "sp-std", + "tiny-keccak", + "twox-hash", +] + +[[package]] +name = "sp-core-hashing-proc-macro" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" +dependencies = [ + "proc-macro2", + "quote", + "sp-core-hashing", + "syn", +] + [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "kvdb", "parking_lot 0.11.2", @@ -10599,8 +10243,8 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "proc-macro2", "quote", @@ -10610,7 +10254,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "environmental", "parity-scale-codec", @@ -10621,7 +10265,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "finality-grandpa", "log", @@ -10639,7 +10283,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -10653,11 +10297,11 @@ dependencies = [ [[package]] name = "sp-io" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "hash-db", - "libsecp256k1 0.6.0", + "libsecp256k1", "log", "parity-scale-codec", "parking_lot 0.11.2", @@ -10677,22 +10321,22 @@ dependencies = [ [[package]] name = "sp-keyring" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "lazy_static", "sp-core", "sp-runtime", - "strum 0.20.0", + "strum 0.22.0", ] [[package]] name = "sp-keystore" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "derive_more", - "futures 0.3.17", + "futures 0.3.18", "merlin", "parity-scale-codec", "parking_lot 0.11.2", @@ -10704,8 +10348,8 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +version = "4.1.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "zstd", ] @@ -10713,7 +10357,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "scale-info", @@ -10728,7 +10372,7 @@ dependencies = [ [[package]] name = "sp-npos-elections-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", @@ -10739,7 +10383,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "sp-api", "sp-core", @@ -10748,16 +10392,18 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "backtrace", + "lazy_static", + "regex", ] [[package]] name = "sp-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "rustc-hash", "serde", @@ -10767,7 +10413,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "either", "hash256-std-hasher", @@ -10789,7 +10435,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -10806,7 +10452,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "Inflector", "proc-macro-crate 1.1.0", @@ -10817,8 +10463,8 @@ dependencies = [ [[package]] name = "sp-serializer" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "serde", "serde_json", @@ -10827,7 +10473,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "scale-info", @@ -10841,7 +10487,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "scale-info", @@ -10852,7 +10498,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "hash-db", "log", @@ -10875,12 +10521,12 @@ dependencies = [ [[package]] name = "sp-std" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" [[package]] name = "sp-storage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10893,7 +10539,7 @@ dependencies = [ [[package]] name = "sp-tasks" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "log", "sp-core", @@ -10906,7 +10552,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "futures-timer 3.0.2", @@ -10922,15 +10568,9 @@ dependencies = [ [[package]] name = "sp-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "erased-serde", - "log", "parity-scale-codec", - "parking_lot 0.10.2", - "serde", - "serde_json", - "slog", "sp-std", "tracing", "tracing-core", @@ -10940,7 +10580,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "sp-api", "sp-runtime", @@ -10949,7 +10589,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", "log", @@ -10965,7 +10605,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "hash-db", "memory-db", @@ -10980,7 +10620,7 @@ dependencies = [ [[package]] name = "sp-version" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10996,7 +10636,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11007,7 +10647,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11021,6 +10661,20 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "ss58-registry" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78abb01d308934b82e34e9cf1f45846d31539246501745b129539176f4f3368d" +dependencies = [ + "Inflector", + "proc-macro2", + "quote", + "serde", + "serde_json", + "unicode-xid", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -11151,9 +10805,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa" +checksum = "40b9788f4202aa75c240ecc9c15c65185e6a39ccdeb0fd5d008b98825464c87c" dependencies = [ "clap", "lazy_static", @@ -11162,9 +10816,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.16" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck", "proc-macro-error 1.0.4", @@ -11175,27 +10829,36 @@ dependencies = [ [[package]] name = "strum" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c" +checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" dependencies = [ - "strum_macros 0.20.1", + "strum_macros 0.21.1", ] [[package]] name = "strum" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" +checksum = "f7ac893c7d471c8a21f31cfe213ec4f6d9afeed25537c772e08ef3f005f8729e" dependencies = [ - "strum_macros 0.21.1", + "strum_macros 0.22.0", +] + +[[package]] +name = "strum" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae14b91c7d11c9a851d3fbc80a963198998c2a64eec840477fa92d8ce9b70bb" +dependencies = [ + "strum_macros 0.23.1", ] [[package]] name = "strum_macros" -version = "0.20.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8bc6b87a5112aeeab1f4a9f7ab634fe6cbefc4850006df31267f4cfb9e3149" +checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec" dependencies = [ "heck", "proc-macro2", @@ -11205,13 +10868,26 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.21.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec" +checksum = "339f799d8b549e3744c7ac7feb216383e4005d94bdb22561b3ab8f3b808ae9fb" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "strum_macros" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bb0dc7ee9c15cea6199cde9a127fa16a4c5819af85395457ad72d68edc85a38" dependencies = [ "heck", "proc-macro2", "quote", + "rustversion", "syn", ] @@ -11231,7 +10907,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "platforms", ] @@ -11239,11 +10915,11 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "frame-system-rpc-runtime-api", - "futures 0.3.17", - "jsonrpc-core 18.0.0", + "futures 0.3.18", + "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "log", @@ -11260,8 +10936,8 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +version = "0.10.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-std", "derive_more", @@ -11295,9 +10971,9 @@ dependencies = [ "finality-grandpa", "finality-relay", "frame-support", - "futures 0.3.17", + "futures 0.3.18", "hex", - "hex-literal 0.3.3", + "hex-literal", "log", "messages-relay", "millau-runtime", @@ -11355,7 +11031,7 @@ dependencies = [ "finality-grandpa", "finality-relay", "frame-support", - "futures 0.3.17", + "futures 0.3.18", "log", "messages-relay", "num-traits", @@ -11375,17 +11051,16 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "async-trait", - "futures 0.3.17", + "futures 0.3.18", "hex", "parity-scale-codec", "sc-client-api", "sc-client-db", "sc-consensus", "sc-executor", - "sc-light", "sc-offchain", "sc-service", "serde", @@ -11402,7 +11077,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ "ansi_term 0.12.1", "build-helper", @@ -11422,9 +11097,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.80" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194" +checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59" dependencies = [ "proc-macro2", "quote", @@ -11444,9 +11119,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.5" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", @@ -11517,18 +11192,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602eca064b2d83369e2b2f34b09c70b605402801927c65c11071ac911d299b88" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bad553cc2c78e8de258400763a647e80e6d1b31ee237275d756f6836d204494c" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -11645,9 +11320,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.4.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5241dd6f21443a3606b432718b166d3cedc962fd4b8bea54a8bc7f514ebda986" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -11660,18 +11335,17 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.12.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2c2416fdedca8443ae44b4527de1ea633af61d8f7169ffa6e72c5b53d24efcc" +checksum = "70e992e41e0d2fb9f755b37446f20900f64446ef54874f40a60c78f021ac6144" dependencies = [ "autocfg", "bytes 1.1.0", "libc", "memchr", - "mio 0.7.13", + "mio 0.7.14", "num_cpus", "once_cell", - "parking_lot 0.11.2", "pin-project-lite 0.2.7", "signal-hook-registry", "tokio-macros", @@ -11680,25 +11354,15 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.3.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" +checksum = "c9efc1aba077437943f7515666aa2b882dfabfbfdf89c819ea75a8d6e9eaba5e" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.22.0" @@ -11712,9 +11376,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", "pin-project-lite 0.2.7", @@ -11723,9 +11387,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d3725d3efa29485e87311c5b699de63cde14b00ed4d256b8318aa30ca452cd" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes 1.1.0", "futures-core", @@ -11817,14 +11481,15 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.22" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62af966210b88ad5776ee3ba12d5f35b8d6a2b2a12168f3080cf02b814d7376b" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ "ansi_term 0.12.1", "chrono", "lazy_static", "matchers", + "parking_lot 0.11.2", "regex", "serde", "serde_json", @@ -11859,16 +11524,6 @@ dependencies = [ "hash-db", ] -[[package]] -name = "triehash" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1631b201eb031b563d2e85ca18ec8092508e262a3196ce9bd10a67ec87b9f5c" -dependencies = [ - "hash-db", - "rlp", -] - [[package]] name = "trust-dns-proto" version = "0.20.3" @@ -11921,9 +11576,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0" +source = "git+https://github.com/paritytech/substrate?branch=master#3fdb445b3b14880017680a3af85e89fb591666a0" dependencies = [ - "jsonrpsee-ws-client", + "jsonrpsee", "log", "parity-scale-codec", "remote-externalities", @@ -11942,13 +11597,19 @@ dependencies = [ "structopt", ] +[[package]] +name = "tt-call" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" + [[package]] name = "twox-hash" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f559b464de2e2bdabcac6a210d12e9b5a5973c251e102c44c585c71d51bd78e" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "rand 0.8.4", "static_assertions", ] @@ -11988,9 +11649,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" @@ -12009,9 +11670,9 @@ checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-width" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" @@ -12049,9 +11710,9 @@ dependencies = [ [[package]] name = "unsigned-varint" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" +checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.1.0", @@ -12090,9 +11751,9 @@ dependencies = [ [[package]] name = "value-bag" -version = "1.0.0-alpha.7" +version = "1.0.0-alpha.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd320e1520f94261153e96f7534476ad869c14022aee1e59af7c778075d840ae" +checksum = "79923f7731dc61ebfba3633098bf3ac533bbd35ccd8c57e7088d9a5eebe0263f" dependencies = [ "ctor", "version_check", @@ -12163,21 +11824,19 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ "cfg-if 1.0.0", - "serde", - "serde_json", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34c405b4f0658583dba0c1c7c9b694f3cac32655db463b56c254a1c75269523" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", @@ -12190,9 +11849,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a87d738d4abc4cf22f6eb142f5b9a81301331ee3c767f2fef2fda4e325492060" +checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -12202,9 +11861,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d5a6580be83b19dc570a8f9c324251687ab2184e57086f71625feb57ec77c8" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -12212,9 +11871,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3775a030dc6f5a0afd8a84981a21cc92a781eb429acef9ecce476d0c9113e92" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2", "quote", @@ -12225,9 +11884,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "wasm-gc-api" @@ -12246,7 +11905,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "js-sys", "parking_lot 0.11.2", "pin-utils", @@ -12257,9 +11916,9 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ee05bba3d1d994652079893941a2ef9324d2b58a63c31b40678fb7eddd7a5a" +checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" dependencies = [ "downcast-rs", "libc", @@ -12272,24 +11931,24 @@ dependencies = [ [[package]] name = "wasmi-validation" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb8e860796d8be48efef530b60eebf84e74a88bce107374fffb0da97d504b8" +checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" dependencies = [ "parity-wasm 0.42.2", ] [[package]] name = "wasmparser" -version = "0.79.0" +version = "0.81.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5894be15a559c85779254700e1d35f02f843b5a69152e5c82c626d9fd66c0e" +checksum = "98930446519f63d00a836efdc22f67766ceae8dbcc1571379f2bcabc6b2b9abc" [[package]] name = "wasmtime" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bbb8a082a8ef50f7eeb8b82dda9709ef1e68963ea3c94e45581644dd4041835" +checksum = "311d06b0c49346d1fbf48a17052e844036b95a7753c1afb34e8c0af3f6b5bb13" dependencies = [ "anyhow", "backtrace", @@ -12300,36 +11959,36 @@ dependencies = [ "lazy_static", "libc", "log", + "object", "paste", "psm", + "rayon", "region", "rustc-demangle", "serde", - "smallvec", "target-lexicon", "wasmparser", "wasmtime-cache", + "wasmtime-cranelift", "wasmtime-environ", "wasmtime-jit", - "wasmtime-profiling", "wasmtime-runtime", "winapi 0.3.9", ] [[package]] name = "wasmtime-cache" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d73391579ca7f24573138ef768b73b2aed5f9d542385c64979b65d60d0912399" +checksum = "36147930a4995137dc096e5b17a573b446799be2bbaea433e821ce6a80abe2c5" dependencies = [ "anyhow", - "base64 0.13.0", + "base64", "bincode", "directories-next", - "errno", "file-per-thread-logger", - "libc", "log", + "rsix", "serde", "sha2 0.9.8", "toml", @@ -12339,27 +11998,18 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81c6f5ae9205382345c7cd7454932a906186836999a2161c385e38a15f52e1fe" +checksum = "ab3083a47e1ede38aac06a1d9831640d673f9aeda0b82a64e4ce002f3432e2e7" dependencies = [ + "anyhow", "cranelift-codegen", "cranelift-entity", "cranelift-frontend", + "cranelift-native", "cranelift-wasm", - "target-lexicon", - "wasmparser", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-debug" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c69e08f55e12f15f50b1b533bc3626723e7224254a065de6576934c86258c9e8" -dependencies = [ - "anyhow", - "gimli", + "gimli 0.25.0", + "log", "more-asserts", "object", "target-lexicon", @@ -12370,94 +12020,55 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "005d93174040af37fb8625f891cd9827afdad314261f7ec4ee61ec497d6e9d3c" +checksum = "1c2d194b655321053bc4111a1aa4ead552655c8a17d17264bc97766e70073510" dependencies = [ + "anyhow", "cfg-if 1.0.0", - "cranelift-codegen", "cranelift-entity", - "cranelift-wasm", - "gimli", + "gimli 0.25.0", "indexmap", "log", "more-asserts", + "object", "serde", + "target-lexicon", "thiserror", "wasmparser", + "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0bf1dfb213a35d8f21aefae40e597fe72778a907011ffdff7affb029a02af9a" +checksum = "864ac8dfe4ce310ac59f16fdbd560c257389cb009ee5d030ac6e30523b023d11" dependencies = [ - "addr2line", + "addr2line 0.16.0", "anyhow", + "bincode", "cfg-if 1.0.0", - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "cranelift-native", - "cranelift-wasm", - "gimli", + "gimli 0.25.0", "log", "more-asserts", "object", - "rayon", "region", + "rsix", "serde", "target-lexicon", "thiserror", "wasmparser", - "wasmtime-cranelift", - "wasmtime-debug", "wasmtime-environ", - "wasmtime-obj", - "wasmtime-profiling", "wasmtime-runtime", "winapi 0.3.9", ] -[[package]] -name = "wasmtime-obj" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231491878e710c68015228c9f9fc5955fe5c96dbf1485c15f7bed55b622c83c" -dependencies = [ - "anyhow", - "more-asserts", - "object", - "target-lexicon", - "wasmtime-debug", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-profiling" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21486cfb5255c2069666c1f116f9e949d4e35c9a494f11112fa407879e42198d" -dependencies = [ - "anyhow", - "cfg-if 1.0.0", - "gimli", - "lazy_static", - "libc", - "object", - "scroll", - "serde", - "target-lexicon", - "wasmtime-environ", - "wasmtime-runtime", -] - [[package]] name = "wasmtime-runtime" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ddfdf32e0a20d81f48be9dacd31612bc61de5a174d1356fef806d300f507de" +checksum = "ab97da813a26b98c9abfd3b0c2d99e42f6b78b749c0646344e2e262d212d8c8b" dependencies = [ "anyhow", "backtrace", @@ -12472,64 +12083,32 @@ dependencies = [ "more-asserts", "rand 0.8.4", "region", + "rsix", "thiserror", "wasmtime-environ", "winapi 0.3.9", ] [[package]] -name = "web-sys" -version = "0.3.54" +name = "wasmtime-types" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a84d70d1ec7d2da2d26a5bd78f4bca1b8c3254805363ce743b7a05bc30d195a" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web3" -version = "0.16.0" -source = "git+https://github.com/svyatonik/rust-web3.git?branch=bump-deps#117badfea7d6dbd748671648e877d6499e20f6ae" +checksum = "ff94409cc3557bfbbcce6b14520ccd6bd3727e965c0fe68d63ef2c185bf379c6" dependencies = [ - "arrayvec 0.5.2", - "base64 0.13.0", - "bytes 1.1.0", - "derive_more", - "ethabi", - "ethereum-types", - "futures 0.3.17", - "futures-timer 3.0.2", - "headers", - "hex", - "jsonrpc-core 17.1.0", - "log", - "parking_lot 0.11.2", - "pin-project 1.0.8", - "reqwest", - "rlp", - "secp256k1", + "cranelift-entity", "serde", - "serde_json", - "soketto 0.5.0", - "tiny-keccak", - "tokio", - "tokio-stream", - "tokio-util", - "url 2.2.2", - "web3-async-native-tls", + "thiserror", + "wasmparser", ] [[package]] -name = "web3-async-native-tls" -version = "0.4.0" +name = "web-sys" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f6d8d1636b2627fe63518d5a9b38a569405d9c9bc665c43c9c341de57227ebb" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ - "native-tls", - "thiserror", - "tokio", - "url 2.2.2", + "js-sys", + "wasm-bindgen", ] [[package]] @@ -12629,15 +12208,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "winreg" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -12667,8 +12237,8 @@ dependencies = [ [[package]] name = "xcm" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "derivative", "impl-trait-for-tuples", @@ -12680,8 +12250,8 @@ dependencies = [ [[package]] name = "xcm-builder" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "frame-support", "frame-system", @@ -12700,8 +12270,8 @@ dependencies = [ [[package]] name = "xcm-executor" -version = "0.9.11" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +version = "0.9.13" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "frame-support", "impl-trait-for-tuples", @@ -12718,26 +12288,20 @@ dependencies = [ [[package]] name = "xcm-procedural" version = "0.1.0" -source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463" +source = "git+https://github.com/paritytech/polkadot?branch=master#bd69f54b6853e9a2f5e0869e5e76213259d4573d" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "yaml-rust" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992" - [[package]] name = "yamux" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107" dependencies = [ - "futures 0.3.17", + "futures 0.3.18", "log", "nohash-hasher", "parking_lot 0.11.2", @@ -12747,18 +12311,18 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970" +checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.2.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7" +checksum = "65f1a51723ec88c66d5d1fe80c841f17f63587d6691901d66be9bec6c3b51f73" dependencies = [ "proc-macro2", "quote", diff --git a/Dockerfile b/Dockerfile index 2d03db8a76f2..ff88c6a5a0a5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,7 @@ WORKDIR /parity-bridges-common COPY . . -ARG PROJECT=ethereum-poa-relay +ARG PROJECT=substrate-relay RUN cargo build --release --verbose -p ${PROJECT} && \ strip ./target/release/${PROJECT} @@ -42,7 +42,7 @@ USER user WORKDIR /home/user -ARG PROJECT=ethereum-poa-relay +ARG PROJECT=substrate-relay COPY --chown=user:user --from=builder /parity-bridges-common/target/release/${PROJECT} ./ COPY --chown=user:user --from=builder /parity-bridges-common/deployments/local-scripts/bridge-entrypoint.sh ./ diff --git a/README.md b/README.md index 41d7fec13d5b..ac3e49b94c6a 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ These components include Substrate pallets for syncing headers, passing arbitrar as libraries for building relayers to provide cross-chain communication capabilities. Three bridge nodes are also available. The nodes can be used to run test networks which bridge other -Substrate chains or Ethereum Proof-of-Authority chains. +Substrate chains. 🚧 The bridges are currently under construction - a hardhat is recommended beyond this point 🚧 @@ -38,7 +38,7 @@ cargo build --all cargo test --all ``` -Also you can build the repo with +Also you can build the repo with [Parity CI Docker image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci): ```bash @@ -54,7 +54,7 @@ docker run --rm -it -w /shellhere/parity-bridges-common \ #artifacts can be found in ~/cache/target ``` -If you want to reproduce other steps of CI process you can use the following +If you want to reproduce other steps of CI process you can use the following [guide](https://github.com/paritytech/scripts#reproduce-ci-locally). If you need more information about setting up your development environment Substrate's @@ -104,7 +104,6 @@ the `relays` which are used to pass messages between chains. ├── diagrams // Pretty pictures of the project architecture │ └── ... ├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── ethereum // Ethereum PoA Header Sync Module │ ├── grandpa // On-Chain GRANDPA Light Client │ ├── messages // Cross Chain Message Passing │ ├── dispatch // Target Chain Message Execution diff --git a/bin/millau/node/Cargo.toml b/bin/millau/node/Cargo.toml index b650bd478a62..c4438d0cef3e 100644 --- a/bin/millau/node/Cargo.toml +++ b/bin/millau/node/Cargo.toml @@ -23,9 +23,13 @@ pallet-bridge-messages = { path = "../../../modules/messages" } # Substrate Dependencies +beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-gadget-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } diff --git a/bin/millau/node/src/chain_spec.rs b/bin/millau/node/src/chain_spec.rs index c32291fb3858..fbfca8692fcb 100644 --- a/bin/millau/node/src/chain_spec.rs +++ b/bin/millau/node/src/chain_spec.rs @@ -14,11 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use beefy_primitives::crypto::AuthorityId as BeefyId; use bp_millau::derive_account_from_rialto_id; use millau_runtime::{ - AccountId, AuraConfig, BalancesConfig, BridgeRialtoMessagesConfig, BridgeWestendGrandpaConfig, - GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, - WASM_BINARY, + AccountId, AuraConfig, BalancesConfig, BeefyConfig, BridgeRialtoMessagesConfig, + BridgeWestendGrandpaConfig, GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, + Signature, SudoConfig, SystemConfig, WASM_BINARY, }; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{sr25519, Pair, Public}; @@ -57,10 +58,11 @@ where } /// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) { +pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, BeefyId, GrandpaId) { ( get_account_id_from_seed::(s), get_from_seed::(s), + get_from_seed::(s), get_from_seed::(s), ) } @@ -173,12 +175,12 @@ impl Alternative { } } -fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys { - SessionKeys { aura, grandpa } +fn session_keys(aura: AuraId, beefy: BeefyId, grandpa: GrandpaId) -> SessionKeys { + SessionKeys { aura, beefy, grandpa } } fn testnet_genesis( - initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>, + initial_authorities: Vec<(AccountId, AuraId, BeefyId, GrandpaId)>, root_key: AccountId, endowed_accounts: Vec, _enable_println: bool, @@ -186,18 +188,20 @@ fn testnet_genesis( GenesisConfig { system: SystemConfig { code: WASM_BINARY.expect("Millau development WASM not available").to_vec(), - changes_trie_config: Default::default(), }, balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), }, aura: AuraConfig { authorities: Vec::new() }, + beefy: BeefyConfig { authorities: Vec::new() }, grandpa: GrandpaConfig { authorities: Vec::new() }, sudo: SudoConfig { key: root_key }, session: SessionConfig { keys: initial_authorities .iter() - .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) + .map(|x| { + (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone(), x.3.clone())) + }) .collect::>(), }, bridge_westend_grandpa: BridgeWestendGrandpaConfig { diff --git a/bin/millau/node/src/command.rs b/bin/millau/node/src/command.rs index 4285ecaced51..4dbf9575dfec 100644 --- a/bin/millau/node/src/command.rs +++ b/bin/millau/node/src/command.rs @@ -20,7 +20,7 @@ use crate::{ service::new_partial, }; use millau_runtime::{Block, RuntimeApi}; -use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; +use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; impl SubstrateCli for Cli { @@ -72,7 +72,7 @@ impl SubstrateCli for Cli { pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); // make sure to set correct crypto version. - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( + sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( millau_runtime::SS58Prefix::get() as u16, )); @@ -146,11 +146,7 @@ pub fn run() -> sc_cli::Result<()> { None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { - match config.role { - Role::Light => service::new_light(config), - _ => service::new_full(config), - } - .map_err(sc_cli::Error::Service) + service::new_full(config).map_err(sc_cli::Error::Service) }) }, } diff --git a/bin/millau/node/src/service.rs b/bin/millau/node/src/service.rs index b8d42f9c7ed3..b01c0bfca906 100644 --- a/bin/millau/node/src/service.rs +++ b/bin/millau/node/src/service.rs @@ -21,18 +21,19 @@ // ===================================================================================== // UPDATE GUIDE: // 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo); -// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom -// RPCs; 3) fix compilation errors; -// 4) test :) +// 2) from old code keep `rpc_extensions_builder` - we use our own custom RPCs; +// 3) from old code keep the Beefy gadget; +// 4) fix compilation errors; +// 5) test :) // ===================================================================================== // ===================================================================================== // ===================================================================================== use millau_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_client_api::ExecutorProvider; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; pub use sc_executor::NativeElseWasmExecutor; - +use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; @@ -40,13 +41,16 @@ use sp_consensus::SlotData; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use std::{sync::Arc, time::Duration}; -type Executor = NativeElseWasmExecutor; - // Our native executor instance. pub struct ExecutorDispatch; impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + /// Only enable the benchmarking host functions when we actually want to benchmark. + #[cfg(feature = "runtime-benchmarks")] type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + /// Otherwise we only use the default Substrate host functions. + #[cfg(not(feature = "runtime-benchmarks"))] + type ExtendHostFunctions = (); fn dispatch(method: &str, data: &[u8]) -> Option> { millau_runtime::api::dispatch(method, data) @@ -62,7 +66,6 @@ type FullClient = type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; -#[allow(clippy::type_complexity)] pub fn new_partial( config: &Configuration, ) -> Result< @@ -86,7 +89,7 @@ pub fn new_partial( ServiceError, > { if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())) + return Err(ServiceError::Other(format!("Remote Keystores are not supported."))) } let telemetry = config @@ -107,15 +110,15 @@ pub fn new_partial( ); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, + sc_service::new_full_parts::( + &config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, )?; let client = Arc::new(client); let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); telemetry }); @@ -175,7 +178,7 @@ pub fn new_partial( }) } -fn remote_keystore(_url: &str) -> Result, &'static str> { +fn remote_keystore(_url: &String) -> Result, &'static str> { // FIXME: here would the concrete keystore be built, // must return a concrete type (NOT `LocalKeystore`) that // implements `CryptoStore` and `SyncCryptoStore` @@ -207,10 +210,11 @@ pub fn new_full(mut config: Configuration) -> Result } config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); + config.network.extra_sets.push(beefy_gadget::beefy_peers_set_config()); let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( backend.clone(), grandpa_link.shared_authority_set().clone(), - vec![], + Vec::default(), )); let (network, system_rpc_tx, network_starter) = @@ -220,7 +224,6 @@ pub fn new_full(mut config: Configuration) -> Result transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, - on_demand: None, block_announce_validator_builder: None, warp_sync: Some(warp_sync), })?; @@ -240,7 +243,9 @@ pub fn new_full(mut config: Configuration) -> Result let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); - let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); + let shared_voter_state = SharedVoterState::empty(); + let (signed_commitment_sender, signed_commitment_stream) = + beefy_gadget::notification::BeefySignedCommitmentStream::channel(); let rpc_extensions_builder = { use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; @@ -263,7 +268,7 @@ pub fn new_full(mut config: Configuration) -> Result Some(shared_authority_set.clone()), ); - Box::new(move |_, subscription_executor| { + Box::new(move |_, subscription_executor: sc_rpc::SubscriptionTaskExecutor| { let mut io = jsonrpc_core::IoHandler::default(); io.extend_with(SystemApi::to_delegate(FullSystem::new( client.clone(), @@ -277,9 +282,18 @@ pub fn new_full(mut config: Configuration) -> Result shared_authority_set.clone(), shared_voter_state.clone(), justification_stream.clone(), - subscription_executor, + subscription_executor.clone(), finality_proof_provider.clone(), ))); + io.extend_with(beefy_gadget_rpc::BeefyApi::to_delegate( + beefy_gadget_rpc::BeefyRpcHandler::new( + signed_commitment_stream.clone(), + subscription_executor, + ), + )); + io.extend_with(pallet_mmr_rpc::MmrApi::to_delegate(pallet_mmr_rpc::Mmr::new( + client.clone(), + ))); Ok(io) }) }; @@ -291,9 +305,7 @@ pub fn new_full(mut config: Configuration) -> Result task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), rpc_extensions_builder, - on_demand: None, - remote_blockchain: None, - backend, + backend: backend.clone(), system_rpc_tx, config, telemetry: telemetry.as_mut(), @@ -317,17 +329,18 @@ pub fn new_full(mut config: Configuration) -> Result let aura = sc_consensus_aura::start_aura::( StartAuraParams { slot_duration, - client, + client: client.clone(), select_chain, block_import, proposer_factory, create_inherent_data_providers: move |_, ()| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - raw_slot_duration, - ); + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + raw_slot_duration, + ); Ok((timestamp, slot)) }, @@ -345,7 +358,9 @@ pub fn new_full(mut config: Configuration) -> Result // the AURA authoring task is considered essential, i.e. if it // fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking("aura", aura); + task_manager + .spawn_essential_handle() + .spawn_blocking("aura", Some("block-authoring"), aura); } // if the node isn't actively participating in consensus then it doesn't @@ -353,6 +368,23 @@ pub fn new_full(mut config: Configuration) -> Result let keystore = if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + let beefy_params = beefy_gadget::BeefyParams { + client, + backend, + key_store: keystore.clone(), + network: network.clone(), + signed_commitment_sender, + min_block_delta: 4, + prometheus_registry: prometheus_registry.clone(), + }; + + // Start the BEEFY bridge gadget. + task_manager.spawn_essential_handle().spawn_blocking( + "beefy-gadget", + None, + beefy_gadget::start_beefy_gadget::<_, _, _, _>(beefy_params), + ); + let grandpa_config = sc_finality_grandpa::Config { // FIXME #1578 make this available through chainspec gossip_duration: Duration::from_millis(333), @@ -385,6 +417,7 @@ pub fn new_full(mut config: Configuration) -> Result // if it fails we take down the service with it. task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", + None, sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, ); } @@ -392,144 +425,3 @@ pub fn new_full(mut config: Configuration) -> Result network_starter.start_network(); Ok(task_manager) } - -/// Builds a new service for a light client. -pub fn new_light(mut config: Configuration) -> Result { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - ); - - let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::( - &config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - - let mut telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); - telemetry - }); - - config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( - config.transaction_pool.clone(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - on_demand.clone(), - )); - - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain, - telemetry.as_ref().map(|x| x.handle()), - )?; - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); - - let import_queue = - sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: grandpa_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import)), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - slot_duration, - ); - - Ok((timestamp, slot)) - }, - spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::NeverCanAuthor, - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; - - let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( - backend.clone(), - grandpa_link.shared_authority_set().clone(), - vec![], - )); - - let (network, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - warp_sync: Some(warp_sync), - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - let enable_grandpa = !config.disable_grandpa; - if enable_grandpa { - let name = config.network.node_name.clone(); - - let config = sc_finality_grandpa::Config { - gossip_duration: std::time::Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore: None, - local_role: config.role.clone(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - }; - - task_manager.spawn_handle().spawn_blocking( - "grandpa-observer", - sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, - ); - } - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - remote_blockchain: Some(backend.remote_blockchain()), - transaction_pool, - task_manager: &mut task_manager, - on_demand: Some(on_demand), - rpc_extensions_builder: Box::new(|_, _| Ok(())), - config, - client, - keystore: keystore_container.sync_keystore(), - backend, - network, - system_rpc_tx, - telemetry: telemetry.as_mut(), - })?; - - network_starter.start_network(); - Ok(task_manager) -} diff --git a/bin/millau/runtime/Cargo.toml b/bin/millau/runtime/Cargo.toml index c8d7f0a15958..13195b95194b 100644 --- a/bin/millau/runtime/Cargo.toml +++ b/bin/millau/runtime/Cargo.toml @@ -30,6 +30,7 @@ pallet-shift-session-manager = { path = "../../../modules/shift-session-manager" # Substrate Dependencies +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -37,7 +38,11 @@ frame-system = { git = "https://github.com/paritytech/substrate", branch = "mast frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -64,6 +69,7 @@ substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", bran [features] default = ["std"] std = [ + "beefy-primitives/std", "bp-header-chain/std", "bp-messages/std", "bp-millau/std", @@ -78,11 +84,14 @@ std = [ "frame-system/std", "pallet-aura/std", "pallet-balances/std", + "pallet-beefy/std", + "pallet-beefy-mmr/std", "pallet-bridge-dispatch/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", "pallet-bridge-token-swap/std", "pallet-grandpa/std", + "pallet-mmr/std", "pallet-randomness-collective-flip/std", "pallet-session/std", "pallet-shift-session-manager/std", diff --git a/bin/millau/runtime/src/lib.rs b/bin/millau/runtime/src/lib.rs index 4e486c267010..288ff9a47d60 100644 --- a/bin/millau/runtime/src/lib.rs +++ b/bin/millau/runtime/src/lib.rs @@ -34,19 +34,23 @@ pub mod rialto_messages; use crate::rialto_messages::{ToRialtoMessagePayload, WithRialtoMessageBridge}; +use beefy_primitives::{crypto::AuthorityId as BeefyId, mmr::MmrLeafVersion, ValidatorSet}; use bridge_runtime_common::messages::{ source::estimate_message_dispatch_and_delivery_fee, MessageBridge, }; use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; +use pallet_mmr_primitives::{ + DataOrHash, EncodableOpaqueLeaf, Error as MmrError, LeafDataProvider, Proof as MmrProof, +}; use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys}, + traits::{Block as BlockT, IdentityLookup, Keccak256, NumberFor, OpaqueKeys}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedPointNumber, MultiSignature, MultiSigner, Perquintill, }; @@ -100,9 +104,6 @@ pub type Hash = bp_millau::Hash; /// Hashing algorithm used by the chain. pub type Hashing = bp_millau::Hasher; -/// Digest item type. -pub type DigestItem = generic::DigestItem; - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades @@ -123,6 +124,7 @@ pub mod opaque { impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, + pub beefy: Beefy, pub grandpa: Grandpa, } } @@ -215,6 +217,11 @@ impl pallet_aura::Config for Runtime { type MaxAuthorities = MaxAuthorities; type DisabledValidators = (); } + +impl pallet_beefy::Config for Runtime { + type BeefyId = BeefyId; +} + impl pallet_bridge_dispatch::Config for Runtime { type Event = Event; type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce); @@ -243,6 +250,40 @@ impl pallet_grandpa::Config for Runtime { type MaxAuthorities = MaxAuthorities; } +type MmrHash = ::Output; + +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + type Hashing = Keccak256; + type Hash = MmrHash; + type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + type WeightInfo = (); + type LeafData = pallet_beefy_mmr::Pallet; +} + +parameter_types! { + /// Version of the produced MMR leaf. + /// + /// The version consists of two parts; + /// - `major` (3 bits) + /// - `minor` (5 bits) + /// + /// `major` should be updated only if decoding the previous MMR Leaf format from the payload + /// is not possible (i.e. backward incompatible change). + /// `minor` should be updated if fields are added to the previous MMR Leaf, which given SCALE + /// encoding does not prevent old leafs from being decoded. + /// + /// Hence we expect `major` to be changed really rarely (think never). + /// See [`MmrLeafVersion`] type documentation for more details. + pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(0, 0); +} + +impl pallet_beefy_mmr::Config for Runtime { + type LeafVersion = LeafVersion; + type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; + type ParachainHeads = (); +} + parameter_types! { pub const MinimumPeriod: u64 = bp_millau::SLOT_DURATION / 2; } @@ -462,6 +503,11 @@ construct_runtime!( ShiftSessionManager: pallet_shift_session_manager::{Pallet}, RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, + // BEEFY Bridges support. + Beefy: pallet_beefy::{Pallet, Storage, Config}, + Mmr: pallet_mmr::{Pallet, Storage}, + MmrLeaf: pallet_beefy_mmr::{Pallet, Storage}, + // Rialto bridge modules. BridgeRialtoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, @@ -606,6 +652,45 @@ impl_runtime_apis! { } } + impl beefy_primitives::BeefyApi for Runtime { + fn validator_set() -> ValidatorSet { + Beefy::validator_set() + } + } + + impl pallet_mmr_primitives::MmrApi for Runtime { + fn generate_proof(leaf_index: u64) + -> Result<(EncodableOpaqueLeaf, MmrProof), MmrError> + { + Mmr::generate_proof(leaf_index) + .map(|(leaf, proof)| (EncodableOpaqueLeaf::from_leaf(&leaf), proof)) + } + + fn verify_proof(leaf: EncodableOpaqueLeaf, proof: MmrProof) + -> Result<(), MmrError> + { + pub type Leaf = < + ::LeafData as LeafDataProvider + >::LeafData; + + let leaf: Leaf = leaf + .into_opaque_leaf() + .try_decode() + .ok_or(MmrError::Verify)?; + Mmr::verify_leaf(leaf, proof) + } + + fn verify_proof_stateless( + root: MmrHash, + leaf: EncodableOpaqueLeaf, + proof: MmrProof + ) -> Result<(), MmrError> { + type MmrHashing = ::Hashing; + let node = DataOrHash::Data(leaf.into_opaque_leaf()); + pallet_mmr::verify_leaf_proof::(root, node, proof) + } + } + impl fg_primitives::GrandpaApi for Runtime { fn current_set_id() -> fg_primitives::SetId { Grandpa::current_set_id() diff --git a/bin/rialto-parachain/node/src/chain_spec.rs b/bin/rialto-parachain/node/src/chain_spec.rs index f93887a21e47..52012423fb71 100644 --- a/bin/rialto-parachain/node/src/chain_spec.rs +++ b/bin/rialto-parachain/node/src/chain_spec.rs @@ -151,7 +151,6 @@ fn testnet_genesis( code: rialto_parachain_runtime::WASM_BINARY .expect("WASM binary was not build, please build it!") .to_vec(), - changes_trie_config: Default::default(), }, balances: rialto_parachain_runtime::BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), diff --git a/bin/rialto-parachain/node/src/cli.rs b/bin/rialto-parachain/node/src/cli.rs index bc2238e2fd44..78c05f90c880 100644 --- a/bin/rialto-parachain/node/src/cli.rs +++ b/bin/rialto-parachain/node/src/cli.rs @@ -103,6 +103,9 @@ pub struct Cli { #[structopt(subcommand)] pub subcommand: Option, + #[structopt(long)] + pub parachain_id: Option, + #[structopt(flatten)] pub run: cumulus_client_cli::RunCmd, diff --git a/bin/rialto-parachain/node/src/command.rs b/bin/rialto-parachain/node/src/command.rs index eb9aba2c104b..e4f52cc026a7 100644 --- a/bin/rialto-parachain/node/src/command.rs +++ b/bin/rialto-parachain/node/src/command.rs @@ -77,7 +77,7 @@ impl SubstrateCli for Cli { } fn load_spec(&self, id: &str) -> std::result::Result, String> { - load_spec(id, self.run.parachain_id.unwrap_or(2000).into()) + load_spec(id, self.parachain_id.unwrap_or(2000).into()) } fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { @@ -153,7 +153,7 @@ macro_rules! construct_async_run { /// Parse command line arguments into service configuration. pub fn run() -> Result<()> { let cli = Cli::from_args(); - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( + sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( rialto_parachain_runtime::SS58Prefix::get() as u16, )); @@ -273,7 +273,7 @@ pub fn run() -> Result<()> { [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), ); - let id = ParaId::from(cli.run.parachain_id.or(para_id).expect("Missing ParaId")); + let id = ParaId::from(cli.parachain_id.or(para_id).expect("Missing ParaId")); let parachain_account = AccountIdConversion::::into_account(&id); diff --git a/bin/rialto-parachain/node/src/service.rs b/bin/rialto-parachain/node/src/service.rs index 65a8e7bb65c5..bd3afca30744 100644 --- a/bin/rialto-parachain/node/src/service.rs +++ b/bin/rialto-parachain/node/src/service.rs @@ -147,7 +147,7 @@ where let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); telemetry }); @@ -283,7 +283,6 @@ where transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue: import_queue.clone(), - on_demand: None, block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)), warp_sync: None, })?; @@ -292,8 +291,6 @@ where let rpc_extensions_builder = Box::new(move |_, _| Ok(rpc_ext_builder(rpc_client.clone()))); sc_service::spawn_tasks(sc_service::SpawnTasksParams { - on_demand: None, - remote_blockchain: None, rpc_extensions_builder, client: client.clone(), transaction_pool: transaction_pool.clone(), diff --git a/bin/rialto/node/Cargo.toml b/bin/rialto/node/Cargo.toml index 75be9bcd9fb7..2795f2eecaec 100644 --- a/bin/rialto/node/Cargo.toml +++ b/bin/rialto/node/Cargo.toml @@ -28,10 +28,15 @@ rialto-runtime = { path = "../runtime" } # Substrate Dependencies +beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-gadget-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -70,6 +75,10 @@ sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "mast substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } +# Polkadot Dependencies + +polkadot-client = { git = "https://github.com/paritytech/polkadot", branch = "master" } + # Polkadot (parachain) Dependencies polkadot-approval-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" } @@ -88,7 +97,6 @@ polkadot-node-core-bitfield-signing = { git = "https://github.com/paritytech/pol polkadot-node-core-candidate-validation = { git = "https://github.com/paritytech/polkadot", branch = "master" } polkadot-node-core-chain-api = { git = "https://github.com/paritytech/polkadot", branch = "master" } polkadot-node-core-chain-selection = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-node-core-dispute-participation = { git = "https://github.com/paritytech/polkadot", branch = "master" } polkadot-node-core-parachains-inherent = { git = "https://github.com/paritytech/polkadot", branch = "master" } polkadot-node-core-provisioner = { git = "https://github.com/paritytech/polkadot", branch = "master" } polkadot-node-core-pvf = { git = "https://github.com/paritytech/polkadot", branch = "master" } diff --git a/bin/rialto/node/src/chain_spec.rs b/bin/rialto/node/src/chain_spec.rs index 3ccfa13e74ac..fb18a35a6af0 100644 --- a/bin/rialto/node/src/chain_spec.rs +++ b/bin/rialto/node/src/chain_spec.rs @@ -14,12 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use beefy_primitives::crypto::AuthorityId as BeefyId; use bp_rialto::derive_account_from_millau_id; use polkadot_primitives::v1::{AssignmentId, ValidatorId}; use rialto_runtime::{ - AccountId, BabeConfig, BalancesConfig, BridgeKovanConfig, BridgeMillauMessagesConfig, - BridgeRialtoPoaConfig, ConfigurationConfig, GenesisConfig, GrandpaConfig, SessionConfig, - SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, + AccountId, BabeConfig, BalancesConfig, BeefyConfig, BridgeMillauMessagesConfig, + ConfigurationConfig, GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, + SudoConfig, SystemConfig, WASM_BINARY, }; use serde_json::json; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; @@ -62,10 +63,11 @@ where /// Helper function to generate authority keys. pub fn get_authority_keys_from_seed( s: &str, -) -> (AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { +) -> (AccountId, BabeId, BeefyId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { ( get_account_id_from_seed::(s), get_from_seed::(s), + get_from_seed::(s), get_from_seed::(s), get_from_seed::(s), get_from_seed::(s), @@ -183,18 +185,20 @@ impl Alternative { fn session_keys( babe: BabeId, + beefy: BeefyId, grandpa: GrandpaId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, ) -> SessionKeys { - SessionKeys { babe, grandpa, para_validator, para_assignment, authority_discovery } + SessionKeys { babe, beefy, grandpa, para_validator, para_assignment, authority_discovery } } fn testnet_genesis( initial_authorities: Vec<( AccountId, BabeId, + BeefyId, GrandpaId, ValidatorId, AssignmentId, @@ -207,7 +211,6 @@ fn testnet_genesis( GenesisConfig { system: SystemConfig { code: WASM_BINARY.expect("Rialto development WASM not available").to_vec(), - changes_trie_config: Default::default(), }, balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), @@ -216,8 +219,7 @@ fn testnet_genesis( authorities: Vec::new(), epoch_config: Some(rialto_runtime::BABE_GENESIS_EPOCH_CONFIG), }, - bridge_rialto_poa: load_rialto_poa_bridge_config(), - bridge_kovan: load_kovan_bridge_config(), + beefy: BeefyConfig { authorities: Vec::new() }, grandpa: GrandpaConfig { authorities: Vec::new() }, sudo: SudoConfig { key: root_key }, session: SessionConfig { @@ -233,6 +235,7 @@ fn testnet_genesis( x.3.clone(), x.4.clone(), x.5.clone(), + x.6.clone(), ), ) }) @@ -291,22 +294,6 @@ fn testnet_genesis( } } -fn load_rialto_poa_bridge_config() -> BridgeRialtoPoaConfig { - BridgeRialtoPoaConfig { - initial_header: rialto_runtime::rialto_poa::genesis_header(), - initial_difficulty: 0.into(), - initial_validators: rialto_runtime::rialto_poa::genesis_validators(), - } -} - -fn load_kovan_bridge_config() -> BridgeKovanConfig { - BridgeKovanConfig { - initial_header: rialto_runtime::kovan::genesis_header(), - initial_difficulty: 0.into(), - initial_validators: rialto_runtime::kovan::genesis_validators(), - } -} - #[test] fn derived_dave_account_is_as_expected() { let dave = get_account_id_from_seed::("Dave"); diff --git a/bin/rialto/node/src/command.rs b/bin/rialto/node/src/command.rs index 6f841a9d67f1..7be615a57760 100644 --- a/bin/rialto/node/src/command.rs +++ b/bin/rialto/node/src/command.rs @@ -70,7 +70,7 @@ impl SubstrateCli for Cli { /// Parse and run command line arguments pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( + sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( rialto_runtime::SS58Prefix::get() as u16, )); diff --git a/bin/rialto/node/src/overseer.rs b/bin/rialto/node/src/overseer.rs index 17f7edce2a31..9a7025e77c9b 100644 --- a/bin/rialto/node/src/overseer.rs +++ b/bin/rialto/node/src/overseer.rs @@ -63,8 +63,7 @@ pub use polkadot_node_core_candidate_validation::CandidateValidationSubsystem; pub use polkadot_node_core_chain_api::ChainApiSubsystem; pub use polkadot_node_core_chain_selection::ChainSelectionSubsystem; pub use polkadot_node_core_dispute_coordinator::DisputeCoordinatorSubsystem; -pub use polkadot_node_core_dispute_participation::DisputeParticipationSubsystem; -pub use polkadot_node_core_provisioner::ProvisioningSubsystem as ProvisionerSubsystem; +pub use polkadot_node_core_provisioner::ProvisionerSubsystem; pub use polkadot_node_core_runtime_api::RuntimeApiSubsystem; pub use polkadot_statement_distribution::StatementDistribution as StatementDistributionSubsystem; @@ -160,7 +159,6 @@ pub fn prepared_overseer_builder( ApprovalVotingSubsystem, GossipSupportSubsystem, DisputeCoordinatorSubsystem, - DisputeParticipationSubsystem, DisputeDistributionSubsystem, ChainSelectionSubsystem, >, @@ -249,7 +247,6 @@ where keystore.clone(), Metrics::register(registry)?, )) - .dispute_participation(DisputeParticipationSubsystem::new()) .dispute_distribution(DisputeDistributionSubsystem::new( keystore, dispute_req_receiver, diff --git a/bin/rialto/node/src/service.rs b/bin/rialto/node/src/service.rs index e2e811eaa67f..3349b09edb9f 100644 --- a/bin/rialto/node/src/service.rs +++ b/bin/rialto/node/src/service.rs @@ -17,16 +17,11 @@ //! Rialto chain node service. //! //! The code is mostly copy of `service/src/lib.rs` file from Polkadot repository -//! without optional functions. - -// this warning comes from Error enum (sc_cli::Error in particular) && it isn't easy to use box -// there -#![allow(clippy::large_enum_variant)] -// this warning comes from `sc_service::PartialComponents` type -#![allow(clippy::type_complexity)] +//! without optional functions, and with BEEFY added on top. use crate::overseer::{OverseerGen, OverseerGenArgs}; +use polkadot_client::RuntimeApiCollection; use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig; use polkadot_node_core_av_store::Config as AvailabilityConfig; use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig; @@ -43,7 +38,7 @@ use sc_service::{config::PrometheusConfig, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_api::{ConstructRuntimeApi, HeaderT}; use sp_consensus::SelectChain; -use sp_runtime::traits::{BlakeTwo256, Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; use substrate_prometheus_endpoint::Registry; @@ -115,52 +110,6 @@ type FullBabeBlockImport = type FullBabeLink = sc_consensus_babe::BabeLink; type FullGrandpaLink = sc_finality_grandpa::LinkHalf; -/// A set of APIs that polkadot-like runtimes must implement. -/// -/// This is the copy of `polkadot_service::RuntimeApiCollection` with some APIs removed -/// (right now - MMR and BEEFY). -pub trait RequiredApiCollection: - sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::ApiExt - + sp_consensus_babe::BabeApi - + sp_finality_grandpa::GrandpaApi - + polkadot_primitives::v1::ParachainHost - + sp_block_builder::BlockBuilder - + frame_system_rpc_runtime_api::AccountNonceApi< - Block, - bp_rialto::AccountId, - rialto_runtime::Index, - > + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + sp_api::Metadata - + sp_offchain::OffchainWorkerApi - + sp_session::SessionKeys - + sp_authority_discovery::AuthorityDiscoveryApi -where - >::StateBackend: sp_api::StateBackend, -{ -} - -impl RequiredApiCollection for Api -where - Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::ApiExt - + sp_consensus_babe::BabeApi - + sp_finality_grandpa::GrandpaApi - + polkadot_primitives::v1::ParachainHost - + sp_block_builder::BlockBuilder - + frame_system_rpc_runtime_api::AccountNonceApi< - Block, - bp_rialto::AccountId, - rialto_runtime::Index, - > + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + sp_api::Metadata - + sp_offchain::OffchainWorkerApi - + sp_session::SessionKeys - + sp_authority_discovery::AuthorityDiscoveryApi, - >::StateBackend: sp_api::StateBackend, -{ -} - // If we're using prometheus, use a registry with a prefix of `polkadot`. fn set_prometheus_registry(config: &mut Configuration) -> Result<(), Error> { if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() { @@ -170,6 +119,8 @@ fn set_prometheus_registry(config: &mut Configuration) -> Result<(), Error> { Ok(()) } +// Needed here for complex return type while `impl Trait` in type aliases is unstable. +#[allow(clippy::type_complexity)] pub fn new_partial( config: &mut Configuration, ) -> Result< @@ -184,7 +135,12 @@ pub fn new_partial( sc_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor, ) -> Result, sc_service::Error>, - (FullBabeBlockImport, FullGrandpaLink, FullBabeLink), + ( + FullBabeBlockImport, + FullGrandpaLink, + FullBabeLink, + beefy_gadget::notification::BeefySignedCommitmentSender, + ), sc_finality_grandpa::SharedVoterState, std::time::Duration, Option, @@ -195,7 +151,7 @@ pub fn new_partial( where RuntimeApi: ConstructRuntimeApi + Send + Sync + 'static, >::RuntimeApi: - RequiredApiCollection>, + RuntimeApiCollection>, ExecutorDispatch: NativeExecutionDispatch + 'static, { set_prometheus_registry(config)?; @@ -226,7 +182,7 @@ where let client = Arc::new(client); let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); telemetry }); @@ -282,7 +238,10 @@ where let shared_authority_set = grandpa_link.shared_authority_set().clone(); let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); - let import_setup = (block_import, grandpa_link, babe_link); + let (signed_commitment_sender, signed_commitment_stream) = + beefy_gadget::notification::BeefySignedCommitmentStream::channel(); + + let import_setup = (block_import, grandpa_link, babe_link, signed_commitment_sender); let rpc_setup = shared_voter_state.clone(); let slot_duration = babe_config.slot_duration(); @@ -316,14 +275,23 @@ where pool, deny_unsafe, ))); - io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( + client.clone(), + ))); io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( shared_authority_set.clone(), shared_voter_state, justification_stream.clone(), - subscription_executor, + subscription_executor.clone(), finality_proof_provider, ))); + io.extend_with(beefy_gadget_rpc::BeefyApi::to_delegate( + beefy_gadget_rpc::BeefyRpcHandler::new( + signed_commitment_stream.clone(), + subscription_executor, + ), + )); + io.extend_with(pallet_mmr_rpc::MmrApi::to_delegate(pallet_mmr_rpc::Mmr::new(client))); Ok(io) } @@ -361,7 +329,7 @@ async fn active_leaves( where RuntimeApi: ConstructRuntimeApi + Send + Sync + 'static, >::RuntimeApi: - RequiredApiCollection>, + RuntimeApiCollection>, ExecutorDispatch: NativeExecutionDispatch + 'static, { let best_block = select_chain.best_chain().await?; @@ -406,7 +374,7 @@ pub fn new_full( where RuntimeApi: ConstructRuntimeApi + Send + Sync + 'static, >::RuntimeApi: - RequiredApiCollection>, + RuntimeApiCollection>, ExecutorDispatch: NativeExecutionDispatch + 'static, { let is_collator = false; @@ -442,6 +410,8 @@ where // Substrate nodes. config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); + config.network.extra_sets.push(beefy_gadget::beefy_peers_set_config()); + { use polkadot_network_bridge::{peer_sets_info, IsAuthority}; let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; @@ -474,7 +444,6 @@ where transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, - on_demand: None, block_announce_validator_builder: None, warp_sync: Some(warp_sync), })?; @@ -533,13 +502,11 @@ where rpc_extensions_builder: Box::new(rpc_extensions_builder), transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, - on_demand: None, - remote_blockchain: None, system_rpc_tx, telemetry: telemetry.as_mut(), })?; - let (block_import, link_half, babe_link) = import_setup; + let (block_import, link_half, babe_link, signed_commitment_sender) = import_setup; let overseer_client = client.clone(); let spawner = task_manager.spawn_handle(); @@ -574,7 +541,9 @@ where prometheus_registry.clone(), ); - task_manager.spawn_handle().spawn("authority-discovery-worker", worker.run()); + task_manager + .spawn_handle() + .spawn("authority-discovery-worker", None, worker.run()); Some(service) } else { None @@ -619,6 +588,7 @@ where let handle = handle.clone(); task_manager.spawn_essential_handle().spawn_blocking( "overseer", + None, Box::pin(async move { use futures::{pin_mut, select, FutureExt}; @@ -705,7 +675,7 @@ where }; let babe = sc_consensus_babe::start_babe(babe_config)?; - task_manager.spawn_essential_handle().spawn_blocking("babe", babe); + task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe); } // if the node isn't actively participating in consensus then it doesn't @@ -713,6 +683,23 @@ where let keystore_opt = if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + let beefy_params = beefy_gadget::BeefyParams { + client: client.clone(), + backend: backend.clone(), + key_store: keystore_opt.clone(), + network: network.clone(), + signed_commitment_sender, + min_block_delta: 2, + prometheus_registry: prometheus_registry.clone(), + }; + + // Start the BEEFY bridge gadget. + task_manager.spawn_essential_handle().spawn_blocking( + "beefy-gadget", + None, + beefy_gadget::start_beefy_gadget::<_, _, _, _>(beefy_params), + ); + let config = sc_finality_grandpa::Config { // FIXME substrate#1578 make this available through chainspec gossip_duration: Duration::from_millis(1000), @@ -751,6 +738,7 @@ where task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", + None, sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, ); } diff --git a/bin/rialto/runtime/Cargo.toml b/bin/rialto/runtime/Cargo.toml index c0be917c2bcc..3c4ec1ebce1c 100644 --- a/bin/rialto/runtime/Cargo.toml +++ b/bin/rialto/runtime/Cargo.toml @@ -17,8 +17,6 @@ serde = { version = "1.0", optional = true, features = ["derive"] } # Bridge dependencies -bp-currency-exchange = { path = "../../../primitives/currency-exchange", default-features = false } -bp-eth-poa = { path = "../../../primitives/ethereum-poa", default-features = false } bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } bp-message-dispatch = { path = "../../../primitives/message-dispatch", default-features = false } bp-messages = { path = "../../../primitives/messages", default-features = false } @@ -26,15 +24,14 @@ bp-millau = { path = "../../../primitives/chain-millau", default-features = fals bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-currency-exchange = { path = "../../../modules/currency-exchange", default-features = false } pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } -pallet-bridge-eth-poa = { path = "../../../modules/ethereum", default-features = false } pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } # Substrate Dependencies +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -43,7 +40,11 @@ frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate" pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -80,8 +81,7 @@ substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", bran [features] default = ["std"] std = [ - "bp-currency-exchange/std", - "bp-eth-poa/std", + "beefy-primitives/std", "bp-header-chain/std", "bp-message-dispatch/std", "bp-messages/std", @@ -99,12 +99,14 @@ std = [ "pallet-authority-discovery/std", "pallet-babe/std", "pallet-balances/std", - "pallet-bridge-currency-exchange/std", + "pallet-beefy/std", + "pallet-beefy-mmr/std", "pallet-bridge-dispatch/std", - "pallet-bridge-eth-poa/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", "pallet-grandpa/std", + "pallet-mmr/std", + "pallet-mmr-primitives/std", "pallet-shift-session-manager/std", "pallet-sudo/std", "pallet-timestamp/std", @@ -137,8 +139,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "libsecp256k1", - "pallet-bridge-currency-exchange/runtime-benchmarks", - "pallet-bridge-eth-poa/runtime-benchmarks", "pallet-bridge-messages/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/bin/rialto/runtime/src/benches.rs b/bin/rialto/runtime/src/benches.rs deleted file mode 100644 index ce3f84069795..000000000000 --- a/bin/rialto/runtime/src/benches.rs +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! We want to use a different validator configuration for benchmarking than what's used in Kovan -//! or in our Rialto test network. However, we can't configure a new validator set on the fly which -//! means we need to wire the runtime together like this - -use pallet_bridge_eth_poa::{ValidatorsConfiguration, ValidatorsSource}; -use sp_std::vec; - -pub use crate::kovan::{ - genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval, - PruningStrategy, -}; - -frame_support::parameter_types! { - pub BridgeValidatorsConfiguration: pallet_bridge_eth_poa::ValidatorsConfiguration = bench_validator_config(); -} - -fn bench_validator_config() -> ValidatorsConfiguration { - ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (1, ValidatorsSource::Contract([3; 20].into(), vec![[1; 20].into()])), - ]) -} diff --git a/bin/rialto/runtime/src/exchange.rs b/bin/rialto/runtime/src/exchange.rs deleted file mode 100644 index 4e18053e52e3..000000000000 --- a/bin/rialto/runtime/src/exchange.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Support for PoA -> Substrate native tokens exchange. -//! -//! If you want to exchange native PoA tokens for native Substrate -//! chain tokens, you need to: -//! 1) send some PoA tokens to `LOCK_FUNDS_ADDRESS` address on PoA chain. Data field of -//! the transaction must be SCALE-encoded id of Substrate account that will receive -//! funds on Substrate chain; -//! 2) wait until the 'lock funds' transaction is mined on PoA chain; -//! 3) wait until the block containing the 'lock funds' transaction is finalized on PoA chain; -//! 4) wait until the required PoA header and its finality are provided -//! to the PoA -> Substrate bridge module (it can be provided by you); -//! 5) receive tokens by providing proof-of-inclusion of PoA transaction. - -use bp_currency_exchange::{ - Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction, - Result as ExchangeResult, -}; -use bp_eth_poa::{transaction_decode_rlp, RawTransaction, RawTransactionReceipt}; -use codec::{Decode, Encode}; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use scale_info::TypeInfo; -use sp_std::vec::Vec; - -/// Ethereum address where locked PoA funds must be sent to. -pub const LOCK_FUNDS_ADDRESS: [u8; 20] = hex!("DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"); - -/// Ethereum transaction inclusion proof. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct EthereumTransactionInclusionProof { - /// Hash of the block with transaction. - pub block: sp_core::H256, - /// Index of the transaction within the block. - pub index: u64, - /// The proof itself (right now it is all RLP-encoded transactions of the block + - /// RLP-encoded receipts of all transactions of the block). - pub proof: Vec<(RawTransaction, RawTransactionReceipt)>, -} - -/// We uniquely identify transfer by the pair (sender, nonce). -/// -/// The assumption is that this pair will never appear more than once in -/// transactions included into finalized blocks. This is obviously true -/// for any existing eth-like chain (that keep current TX format), because -/// otherwise transaction can be replayed over and over. -#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct EthereumTransactionTag { - /// Account that has locked funds. - pub account: [u8; 20], - /// Lock transaction nonce. - pub nonce: sp_core::U256, -} - -/// Ethereum transaction from runtime perspective. -pub struct EthTransaction; - -impl MaybeLockFundsTransaction for EthTransaction { - type Transaction = RawTransaction; - type Id = EthereumTransactionTag; - type Recipient = crate::AccountId; - type Amount = crate::Balance; - - fn parse( - raw_tx: &Self::Transaction, - ) -> ExchangeResult> { - let tx = transaction_decode_rlp(raw_tx).map_err(|_| ExchangeError::InvalidTransaction)?; - - // we only accept transactions sending funds directly to the pre-configured address - if tx.unsigned.to != Some(LOCK_FUNDS_ADDRESS.into()) { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid peer recipient: {:?}", - tx.unsigned.to, - ); - - return Err(ExchangeError::InvalidTransaction) - } - - let mut recipient_raw = sp_core::H256::default(); - match tx.unsigned.payload.len() { - 32 => recipient_raw.as_fixed_bytes_mut().copy_from_slice(&tx.unsigned.payload), - len => { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid recipient length: {}", - len, - ); - - return Err(ExchangeError::InvalidRecipient) - }, - } - let amount = tx.unsigned.value.low_u128(); - - if tx.unsigned.value != amount.into() { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid amount: {}", - tx.unsigned.value, - ); - - return Err(ExchangeError::InvalidAmount) - } - - Ok(LockFundsTransaction { - id: EthereumTransactionTag { - account: *tx.sender.as_fixed_bytes(), - nonce: tx.unsigned.nonce, - }, - recipient: crate::AccountId::from(*recipient_raw.as_fixed_bytes()), - amount, - }) - } -} - -/// Prepares everything required to bench claim of funds locked by given transaction. -#[cfg(feature = "runtime-benchmarks")] -pub(crate) fn prepare_environment_for_claim, I: 'static>( - transactions: &[(RawTransaction, RawTransactionReceipt)], -) -> bp_eth_poa::H256 { - use bp_eth_poa::compute_merkle_root; - use pallet_bridge_eth_poa::{ - test_utils::{insert_dummy_header, validator_utils::validator, HeaderBuilder}, - BridgeStorage, Storage, - }; - - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent_number_on_runtime::(0) - .transactions_root(compute_merkle_root(transactions.iter().map(|(tx, _)| tx))) - .receipts_root(compute_merkle_root(transactions.iter().map(|(_, receipt)| receipt))) - .sign_by(&validator(0)); - let header_id = header.compute_id(); - insert_dummy_header(&mut storage, header); - storage.finalize_and_prune_headers(Some(header_id), 0); - - header_id.hash -} - -/// Prepare signed ethereum lock-funds transaction. -#[cfg(any(feature = "runtime-benchmarks", test))] -pub(crate) fn prepare_ethereum_transaction( - recipient: &crate::AccountId, - editor: impl Fn(&mut bp_eth_poa::UnsignedTransaction), -) -> (RawTransaction, RawTransactionReceipt) { - use bp_eth_poa::{signatures::SignTransaction, Receipt, TransactionOutcome}; - - // prepare tx for OpenEthereum private dev chain: - // chain id is 0x11 - // sender secret is 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 - let chain_id = 0x11; - let signer = libsecp256k1::SecretKey::parse(&hex!( - "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" - )) - .unwrap(); - let recipient_raw: &[u8; 32] = recipient.as_ref(); - let mut eth_tx = bp_eth_poa::UnsignedTransaction { - nonce: 0.into(), - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: 100.into(), - gas: 100_000.into(), - gas_price: 100_000.into(), - payload: recipient_raw.to_vec(), - }; - editor(&mut eth_tx); - ( - eth_tx.sign_by(&signer, Some(chain_id)), - Receipt { - outcome: TransactionOutcome::StatusCode(1), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp(), - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - fn ferdie() -> crate::AccountId { - hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c").into() - } - - #[test] - fn valid_transaction_accepted() { - assert_eq!( - EthTransaction::parse(&prepare_ethereum_transaction(&ferdie(), |_| {}).0), - Ok(LockFundsTransaction { - id: EthereumTransactionTag { - account: hex!("00a329c0648769a73afac7f9381e08fb43dbea72"), - nonce: 0.into(), - }, - recipient: ferdie(), - amount: 100, - }), - ); - } - - #[test] - fn invalid_transaction_rejected() { - assert_eq!(EthTransaction::parse(&Vec::new()), Err(ExchangeError::InvalidTransaction),); - } - - #[test] - fn transaction_with_invalid_peer_recipient_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.to = None; - }) - .0 - ), - Err(ExchangeError::InvalidTransaction), - ); - } - - #[test] - fn transaction_with_invalid_recipient_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.payload.clear(); - }) - .0 - ), - Err(ExchangeError::InvalidRecipient), - ); - } - - #[test] - fn transaction_with_invalid_amount_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.value = sp_core::U256::from(u128::max_value()) + sp_core::U256::from(1); - }) - .0 - ), - Err(ExchangeError::InvalidAmount), - ); - } -} diff --git a/bin/rialto/runtime/src/kovan.rs b/bin/rialto/runtime/src/kovan.rs deleted file mode 100644 index 95b4f8c42f03..000000000000 --- a/bin/rialto/runtime/src/kovan.rs +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::exchange::EthereumTransactionInclusionProof; - -use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use pallet_bridge_eth_poa::{ - AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy, - ValidatorsConfiguration, ValidatorsSource, -}; -use sp_std::prelude::*; - -frame_support::parameter_types! { - pub const FinalityVotesCachingInterval: Option = Some(16); - pub BridgeAuraConfiguration: AuraConfiguration = - kovan_aura_configuration(); - pub BridgeValidatorsConfiguration: ValidatorsConfiguration = - kovan_validators_configuration(); -} - -/// Max number of finalized headers to keep. It is equivalent of around 24 hours of -/// finalized blocks on current Kovan chain. -const FINALIZED_HEADERS_TO_KEEP: u64 = 20_000; - -/// Aura engine configuration for Kovan chain. -pub fn kovan_aura_configuration() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: u64::max_value(), - strict_empty_steps_transition: 0, - validate_step_transition: 0x16e360, - validate_score_transition: 0x41a3c4, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration for Kovan chain. -pub fn kovan_validators_configuration() -> ValidatorsConfiguration { - ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(genesis_validators())), - ( - 10960440, - ValidatorsSource::List(vec![ - hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), - hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), - hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), - ]), - ), - ( - 10960500, - ValidatorsSource::Contract( - hex!("aE71807C1B0a093cB1547b682DC78316D945c9B8").into(), - vec![ - hex!("d05f7478c6aa10781258c5cc8b4f385fc8fa989c").into(), - hex!("03801efb0efe2a25ede5dd3a003ae880c0292e4d").into(), - hex!("a4df255ecf08bbf2c28055c65225c9a9847abd94").into(), - hex!("596e8221a30bfe6e7eff67fee664a01c73ba3c56").into(), - hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), - ], - ), - ), - ]) -} - -/// Genesis validators set of Kovan chain. -pub fn genesis_validators() -> Vec
{ - vec![ - hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), - hex!("00427feae2419c15b89d1c21af10d1b6650a4d3d").into(), - hex!("4Ed9B08e6354C70fE6F8CB0411b0d3246b424d6c").into(), - hex!("0020ee4Be0e2027d76603cB751eE069519bA81A1").into(), - hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), - hex!("007733a1FE69CF3f2CF989F81C7b4cAc1693387A").into(), - hex!("00E6d2b931F55a3f1701c7389d592a7778897879").into(), - hex!("00e4a10650e5a6D6001C38ff8E64F97016a1645c").into(), - hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), - ] -} - -/// Genesis header of the Kovan chain. -pub fn genesis_header() -> AuraHeader { - AuraHeader { - parent_hash: Default::default(), - timestamp: 0, - number: 0, - author: Default::default(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .into(), - uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - .into(), - extra_data: vec![], - state_root: hex!("2480155b48a1cea17d67dbfdfaafe821c1d19cdd478c5358e8ec56dec24502b2").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .into(), - log_bloom: Default::default(), - gas_used: Default::default(), - gas_limit: 6000000.into(), - difficulty: 131072.into(), - seal: vec![ - vec![128], - vec![ - 184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - ], - } -} - -/// Kovan headers pruning strategy. -/// -/// We do not prune unfinalized headers because exchange module only accepts -/// claims from finalized headers. And if we're pruning unfinalized headers, then -/// some claims may never be accepted. -#[derive(Default, RuntimeDebug)] -pub struct PruningStrategy; - -impl BridgePruningStrategy for PruningStrategy { - fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) - } -} - -/// PoA Header timestamp verification against `Timestamp` pallet. -#[derive(Default, RuntimeDebug)] -pub struct ChainTime; - -impl TChainTime for ChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = super::Timestamp::now(); - timestamp > now - } -} - -/// The Kovan Blockchain as seen by the runtime. -pub struct KovanBlockchain; - -impl InclusionProofVerifier for KovanBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = EthereumTransactionInclusionProof; - - fn verify_transaction_inclusion_proof( - proof: &Self::TransactionInclusionProof, - ) -> Option { - let is_transaction_finalized = crate::BridgeKovan::verify_transaction_finalized( - proof.block, - proof.index, - &proof.proof, - ); - - if !is_transaction_finalized { - return None - } - - proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn pruning_strategy_keeps_enough_headers() { - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 10_000), - 0, - "10_000 <= 20_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 20_000), - 0, - "20_000 <= 20_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 30_000), - 10_000, - "20_000 <= 30_000 => we're ready to prune first 10_000 headers", - ); - } -} diff --git a/bin/rialto/runtime/src/lib.rs b/bin/rialto/runtime/src/lib.rs index bf5a8b36ec19..0987184c73aa 100644 --- a/bin/rialto/runtime/src/lib.rs +++ b/bin/rialto/runtime/src/lib.rs @@ -30,30 +30,28 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod exchange; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benches; -pub mod kovan; pub mod millau_messages; pub mod parachains; -pub mod rialto_poa; use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; +use beefy_primitives::{crypto::AuthorityId as BeefyId, mmr::MmrLeafVersion, ValidatorSet}; use bridge_runtime_common::messages::{ source::estimate_message_dispatch_and_delivery_fee, MessageBridge, }; use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; +use pallet_mmr_primitives::{ + DataOrHash, EncodableOpaqueLeaf, Error as MmrError, LeafDataProvider, Proof as MmrProof, +}; use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, Block as BlockT, NumberFor, OpaqueKeys}, + traits::{AccountIdLookup, Block as BlockT, Keccak256, NumberFor, OpaqueKeys}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedPointNumber, MultiSignature, MultiSigner, Perquintill, }; @@ -72,8 +70,6 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_bridge_currency_exchange::Call as BridgeCurrencyExchangeCall; -pub use pallet_bridge_eth_poa::Call as BridgeEthPoACall; pub use pallet_bridge_grandpa::Call as BridgeGrandpaMillauCall; pub use pallet_bridge_messages::Call as MessagesCall; pub use pallet_sudo::Call as SudoCall; @@ -109,9 +105,6 @@ pub type Hash = bp_rialto::Hash; /// Hashing algorithm used by the chain. pub type Hashing = bp_rialto::Hasher; -/// Digest item type. -pub type DigestItem = generic::DigestItem; - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades @@ -133,6 +126,7 @@ impl_opaque_keys! { pub struct SessionKeys { pub babe: Babe, pub grandpa: Grandpa, + pub beefy: Beefy, pub para_validator: Initializer, pub para_assignment: SessionInfo, pub authority_discovery: AuthorityDiscovery, @@ -253,46 +247,8 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); } -type RialtoPoA = pallet_bridge_eth_poa::Instance1; -impl pallet_bridge_eth_poa::Config for Runtime { - type AuraConfiguration = rialto_poa::BridgeAuraConfiguration; - type FinalityVotesCachingInterval = rialto_poa::FinalityVotesCachingInterval; - type ValidatorsConfiguration = rialto_poa::BridgeValidatorsConfiguration; - type PruningStrategy = rialto_poa::PruningStrategy; - type ChainTime = rialto_poa::ChainTime; - type OnHeadersSubmitted = (); -} - -type Kovan = pallet_bridge_eth_poa::Instance2; -impl pallet_bridge_eth_poa::Config for Runtime { - type AuraConfiguration = kovan::BridgeAuraConfiguration; - type FinalityVotesCachingInterval = kovan::FinalityVotesCachingInterval; - type ValidatorsConfiguration = kovan::BridgeValidatorsConfiguration; - type PruningStrategy = kovan::PruningStrategy; - type ChainTime = kovan::ChainTime; - type OnHeadersSubmitted = (); -} - -type RialtoCurrencyExchange = pallet_bridge_currency_exchange::Instance1; -impl pallet_bridge_currency_exchange::Config for Runtime { - type OnTransactionSubmitted = (); - type PeerBlockchain = rialto_poa::RialtoBlockchain; - type PeerMaybeLockFundsTransaction = exchange::EthTransaction; - type RecipientsMap = bp_currency_exchange::IdentityRecipients; - type Amount = Balance; - type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; - type DepositInto = DepositInto; -} - -type KovanCurrencyExchange = pallet_bridge_currency_exchange::Instance2; -impl pallet_bridge_currency_exchange::Config for Runtime { - type OnTransactionSubmitted = (); - type PeerBlockchain = kovan::KovanBlockchain; - type PeerMaybeLockFundsTransaction = exchange::EthTransaction; - type RecipientsMap = bp_currency_exchange::IdentityRecipients; - type Amount = Balance; - type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; - type DepositInto = DepositInto; +impl pallet_beefy::Config for Runtime { + type BeefyId = BeefyId; } impl pallet_bridge_dispatch::Config for Runtime { @@ -307,68 +263,6 @@ impl pallet_bridge_dispatch::Config for Runtime { type AccountIdConverter = bp_rialto::AccountIdConverter; } -pub struct DepositInto; - -impl bp_currency_exchange::DepositInto for DepositInto { - type Recipient = AccountId; - type Amount = Balance; - - fn deposit_into( - recipient: Self::Recipient, - amount: Self::Amount, - ) -> bp_currency_exchange::Result<()> { - // let balances module make all checks for us (it won't allow depositing lower than - // existential deposit, balance overflow, ...) - let deposited = as Currency>::deposit_creating( - &recipient, amount, - ); - - // I'm dropping deposited here explicitly to illustrate the fact that it'll update - // `TotalIssuance` on drop - let deposited_amount = deposited.peek(); - drop(deposited); - - // we have 3 cases here: - // - deposited == amount: success - // - deposited == 0: deposit has failed and no changes to storage were made - // - deposited != 0: (should never happen in practice) deposit has been partially completed - match deposited_amount { - _ if deposited_amount == amount => { - log::trace!( - target: "runtime", - "Deposited {} to {:?}", - amount, - recipient, - ); - - Ok(()) - }, - _ if deposited_amount == 0 => { - log::error!( - target: "runtime", - "Deposit of {} to {:?} has failed", - amount, - recipient, - ); - - Err(bp_currency_exchange::Error::DepositFailed) - }, - _ => { - log::error!( - target: "runtime", - "Deposit of {} to {:?} has partially competed. {} has been deposited", - amount, - recipient, - deposited_amount, - ); - - // we can't return DepositFailed error here, because storage changes were made - Err(bp_currency_exchange::Error::DepositPartiallyFailed) - }, - } - } -} - impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -385,6 +279,38 @@ impl pallet_grandpa::Config for Runtime { type WeightInfo = (); } +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + type Hashing = Keccak256; + type Hash = ::Output; + type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + type WeightInfo = (); + type LeafData = pallet_beefy_mmr::Pallet; +} + +parameter_types! { + /// Version of the produced MMR leaf. + /// + /// The version consists of two parts; + /// - `major` (3 bits) + /// - `minor` (5 bits) + /// + /// `major` should be updated only if decoding the previous MMR Leaf format from the payload + /// is not possible (i.e. backward incompatible change). + /// `minor` should be updated if fields are added to the previous MMR Leaf, which given SCALE + /// encoding does not prevent old leafs from being decoded. + /// + /// Hence we expect `major` to be changed really rarely (think never). + /// See [`MmrLeafVersion`] type documentation for more details. + pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(0, 0); +} + +impl pallet_beefy_mmr::Config for Runtime { + type LeafVersion = LeafVersion; + type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; + type ParachainHeads = (); +} + parameter_types! { pub const MinimumPeriod: u64 = bp_rialto::SLOT_DURATION / 2; } @@ -578,11 +504,10 @@ construct_runtime!( Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, ShiftSessionManager: pallet_shift_session_manager::{Pallet}, - // Eth-PoA chains bridge modules. - BridgeRialtoPoa: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, - BridgeKovan: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, - BridgeRialtoCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, - BridgeKovanCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, + // BEEFY Bridges support. + Beefy: pallet_beefy::{Pallet, Storage, Config}, + Mmr: pallet_mmr::{Pallet, Storage}, + MmrLeaf: pallet_beefy_mmr::{Pallet, Storage}, // Millau bridge modules. BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, @@ -693,43 +618,42 @@ impl_runtime_apis! { } } - impl bp_eth_poa::RialtoPoAHeaderApi for Runtime { - fn best_block() -> (u64, bp_eth_poa::H256) { - let best_block = BridgeRialtoPoa::best_block(); - (best_block.number, best_block.hash) - } - - fn finalized_block() -> (u64, bp_eth_poa::H256) { - let finalized_block = BridgeRialtoPoa::finalized_block(); - (finalized_block.number, finalized_block.hash) - } - - fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { - BridgeRialtoPoa::is_import_requires_receipts(header) - } - - fn is_known_block(hash: bp_eth_poa::H256) -> bool { - BridgeRialtoPoa::is_known_block(hash) + impl beefy_primitives::BeefyApi for Runtime { + fn validator_set() -> ValidatorSet { + Beefy::validator_set() } } - impl bp_eth_poa::KovanHeaderApi for Runtime { - fn best_block() -> (u64, bp_eth_poa::H256) { - let best_block = BridgeKovan::best_block(); - (best_block.number, best_block.hash) + impl pallet_mmr_primitives::MmrApi for Runtime { + fn generate_proof(leaf_index: u64) + -> Result<(EncodableOpaqueLeaf, MmrProof), MmrError> + { + Mmr::generate_proof(leaf_index) + .map(|(leaf, proof)| (EncodableOpaqueLeaf::from_leaf(&leaf), proof)) } - fn finalized_block() -> (u64, bp_eth_poa::H256) { - let finalized_block = BridgeKovan::finalized_block(); - (finalized_block.number, finalized_block.hash) + fn verify_proof(leaf: EncodableOpaqueLeaf, proof: MmrProof) + -> Result<(), MmrError> + { + pub type Leaf = < + ::LeafData as LeafDataProvider + >::LeafData; + + let leaf: Leaf = leaf + .into_opaque_leaf() + .try_decode() + .ok_or(MmrError::Verify)?; + Mmr::verify_leaf(leaf, proof) } - fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { - BridgeKovan::is_import_requires_receipts(header) - } - - fn is_known_block(hash: bp_eth_poa::H256) -> bool { - BridgeKovan::is_known_block(hash) + fn verify_proof_stateless( + root: Hash, + leaf: EncodableOpaqueLeaf, + proof: MmrProof + ) -> Result<(), MmrError> { + type MmrHashing = ::Hashing; + let node = DataOrHash::Data(leaf.into_opaque_leaf()); + pallet_mmr::verify_leaf_proof::(root, node, proof) } } @@ -744,18 +668,6 @@ impl_runtime_apis! { } } - impl bp_currency_exchange::RialtoCurrencyExchangeApi for Runtime { - fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { - BridgeRialtoCurrencyExchange::filter_transaction_proof(&proof) - } - } - - impl bp_currency_exchange::KovanCurrencyExchangeApi for Runtime { - fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { - BridgeKovanCurrencyExchange::filter_transaction_proof(&proof) - } - } - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, @@ -845,6 +757,13 @@ impl_runtime_apis! { polkadot_runtime_parachains::runtime_api_impl::v1::persisted_validation_data::(para_id, assumption) } + fn assumed_validation_data( + para_id: polkadot_primitives::v1::Id, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(polkadot_primitives::v1::PersistedValidationData, polkadot_primitives::v1::ValidationCodeHash)> { + polkadot_runtime_parachains::runtime_api_impl::v1::assumed_validation_data::(para_id, expected_persisted_validation_data_hash) + } + fn check_validation_outputs( para_id: polkadot_primitives::v1::Id, outputs: polkadot_primitives::v1::CandidateCommitments, @@ -1028,17 +947,10 @@ impl_runtime_apis! { use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; - use pallet_bridge_currency_exchange::benchmarking::Pallet as BridgeCurrencyExchangeBench; use pallet_bridge_messages::benchmarking::Pallet as MessagesBench; let mut list = Vec::::new(); - list_benchmark!(list, extra, pallet_bridge_eth_poa, BridgeRialtoPoa); - list_benchmark!( - list, - extra, - pallet_bridge_currency_exchange, BridgeCurrencyExchangeBench:: - ); list_benchmark!(list, extra, pallet_bridge_messages, MessagesBench::); list_benchmark!(list, extra, pallet_bridge_grandpa, BridgeMillauGrandpa); @@ -1068,46 +980,6 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&config, &whitelist); - use pallet_bridge_currency_exchange::benchmarking::{ - Pallet as BridgeCurrencyExchangeBench, - Config as BridgeCurrencyExchangeConfig, - ProofParams as BridgeCurrencyExchangeProofParams, - }; - - impl BridgeCurrencyExchangeConfig for Runtime { - fn make_proof( - proof_params: BridgeCurrencyExchangeProofParams, - ) -> crate::exchange::EthereumTransactionInclusionProof { - use bp_currency_exchange::DepositInto; - - if proof_params.recipient_exists { - >::DepositInto::deposit_into( - proof_params.recipient.clone(), - ExistentialDeposit::get(), - ).unwrap(); - } - - let (transaction, receipt) = crate::exchange::prepare_ethereum_transaction( - &proof_params.recipient, - |tx| { - // our runtime only supports transactions where data is exactly 32 bytes long - // (receiver key) - // => we are ignoring `transaction_size_factor` here - tx.value = (ExistentialDeposit::get() * 10).into(); - }, - ); - let transactions = sp_std::iter::repeat((transaction, receipt)) - .take(1 + proof_params.proof_size_factor as usize) - .collect::>(); - let block_hash = crate::exchange::prepare_environment_for_claim::(&transactions); - crate::exchange::EthereumTransactionInclusionProof { - block: block_hash, - index: 0, - proof: transactions, - } - } - } - use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; use bp_runtime::messages::DispatchFeePayment; use bridge_runtime_common::messages; @@ -1274,13 +1146,6 @@ impl_runtime_apis! { } } - add_benchmark!(params, batches, pallet_bridge_eth_poa, BridgeRialtoPoa); - add_benchmark!( - params, - batches, - pallet_bridge_currency_exchange, - BridgeCurrencyExchangeBench:: - ); add_benchmark!( params, batches, @@ -1322,48 +1187,8 @@ where #[cfg(test)] mod tests { use super::*; - use bp_currency_exchange::DepositInto; use bridge_runtime_common::messages; - fn run_deposit_into_test(test: impl Fn(AccountId) -> Balance) { - let mut ext: sp_io::TestExternalities = - SystemConfig::default().build_storage::().unwrap().into(); - ext.execute_with(|| { - // initially issuance is zero - assert_eq!( - as Currency>::total_issuance(), - 0, - ); - - // create account - let account: AccountId = [1u8; 32].into(); - let initial_amount = ExistentialDeposit::get(); - let deposited = - as Currency>::deposit_creating( - &account, - initial_amount, - ); - drop(deposited); - assert_eq!( - as Currency>::total_issuance(), - initial_amount, - ); - assert_eq!( - as Currency>::free_balance(&account), - initial_amount, - ); - - // run test - let total_issuance_change = test(account); - - // check that total issuance has changed by `run_deposit_into_test` - assert_eq!( - as Currency>::total_issuance(), - initial_amount + total_issuance_change, - ); - }); - } - #[test] fn ensure_rialto_message_lane_weights_are_correct() { type Weights = pallet_bridge_messages::weights::RialtoWeight; @@ -1405,53 +1230,12 @@ mod tests { ); } - #[test] - fn deposit_into_existing_account_works() { - run_deposit_into_test(|existing_account| { - let initial_amount = - as Currency>::free_balance( - &existing_account, - ); - let additional_amount = 10_000; - >::DepositInto::deposit_into( - existing_account.clone(), - additional_amount, - ) - .unwrap(); - assert_eq!( - as Currency>::free_balance( - &existing_account - ), - initial_amount + additional_amount, - ); - additional_amount - }); - } - - #[test] - fn deposit_into_new_account_works() { - run_deposit_into_test(|_| { - let initial_amount = 0; - let additional_amount = ExistentialDeposit::get() + 10_000; - let new_account: AccountId = [42u8; 32].into(); - >::DepositInto::deposit_into( - new_account.clone(), - additional_amount, - ) - .unwrap(); - assert_eq!( - as Currency>::free_balance( - &new_account - ), - initial_amount + additional_amount, - ); - additional_amount - }); - } - #[test] fn call_size() { - const MAX_CALL_SIZE: usize = 230; // value from polkadot-runtime tests - assert!(core::mem::size_of::() <= MAX_CALL_SIZE); + const DOT_MAX_CALL_SZ: usize = 230; + assert!(core::mem::size_of::>() <= DOT_MAX_CALL_SZ); + // FIXME: get this down to 230. https://github.com/paritytech/grandpa-bridge-gadget/issues/359 + const BEEFY_MAX_CALL_SZ: usize = 232; + assert!(core::mem::size_of::>() <= BEEFY_MAX_CALL_SZ); } } diff --git a/bin/rialto/runtime/src/parachains.rs b/bin/rialto/runtime/src/parachains.rs index 9a2f85460153..332a3387ac69 100644 --- a/bin/rialto/runtime/src/parachains.rs +++ b/bin/rialto/runtime/src/parachains.rs @@ -71,7 +71,9 @@ impl parachains_paras::Config for Runtime { type WeightInfo = parachains_paras::TestWeightInfo; } -impl parachains_paras_inherent::Config for Runtime {} +impl parachains_paras_inherent::Config for Runtime { + type WeightInfo = parachains_paras_inherent::TestWeightInfo; +} impl parachains_scheduler::Config for Runtime {} diff --git a/bin/rialto/runtime/src/rialto_poa.rs b/bin/rialto/runtime/src/rialto_poa.rs deleted file mode 100644 index 865ef387d1b4..000000000000 --- a/bin/rialto/runtime/src/rialto_poa.rs +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Configuration parameters for the Rialto PoA chain. - -use crate::exchange::EthereumTransactionInclusionProof; - -use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use pallet_bridge_eth_poa::{ - AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy, - ValidatorsConfiguration, ValidatorsSource, -}; -use sp_std::prelude::*; - -frame_support::parameter_types! { - pub const FinalityVotesCachingInterval: Option = Some(8); - pub BridgeAuraConfiguration: AuraConfiguration = - aura_configuration(); - pub BridgeValidatorsConfiguration: ValidatorsConfiguration = - validators_configuration(); -} - -/// Max number of finalized headers to keep. -const FINALIZED_HEADERS_TO_KEEP: u64 = 5_000; - -/// Aura engine configuration for Rialto chain. -pub fn aura_configuration() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: 0xfffffffff, - strict_empty_steps_transition: 0, - validate_step_transition: 0, - validate_score_transition: 0, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration for Rialto PoA chain. -pub fn validators_configuration() -> ValidatorsConfiguration { - ValidatorsConfiguration::Single(ValidatorsSource::List(genesis_validators())) -} - -/// Genesis validators set of Rialto PoA chain. -pub fn genesis_validators() -> Vec
{ - vec![ - hex!("005e714f896a8b7cede9d38688c1a81de72a58e4").into(), - hex!("007594304039c2937a12220338aab821d819f5a4").into(), - hex!("004e7a39907f090e19b0b80a277e77b72b22e269").into(), - ] -} - -/// Genesis header of the Rialto PoA chain. -/// -/// To obtain genesis header from a running node, invoke: -/// ```bash -/// $ http localhost:8545 jsonrpc=2.0 id=1 method=eth_getBlockByNumber params:='["earliest", false]' -v -/// ``` -pub fn genesis_header() -> AuraHeader { - AuraHeader { - parent_hash: Default::default(), - timestamp: 0, - number: 0, - author: Default::default(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .into(), - uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - .into(), - extra_data: vec![], - state_root: hex!("a992d04c791620ed7ed96555a80cf0568355bb4bee2656f46899a4372f25f248").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .into(), - log_bloom: Default::default(), - gas_used: Default::default(), - gas_limit: 0x222222.into(), - difficulty: 0x20000.into(), - seal: vec![vec![0x80], { - let mut vec = vec![0xb8, 0x41]; - vec.resize(67, 0); - vec - }], - } -} - -/// Rialto PoA headers pruning strategy. -/// -/// We do not prune unfinalized headers because exchange module only accepts -/// claims from finalized headers. And if we're pruning unfinalized headers, then -/// some claims may never be accepted. -#[derive(Default, RuntimeDebug)] -pub struct PruningStrategy; - -impl TPruningStrategy for PruningStrategy { - fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) - } -} - -/// ChainTime provider -#[derive(Default)] -pub struct ChainTime; - -impl TChainTime for ChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = super::Timestamp::now(); - timestamp > now - } -} - -/// The Rialto PoA Blockchain as seen by the runtime. -pub struct RialtoBlockchain; - -impl InclusionProofVerifier for RialtoBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = EthereumTransactionInclusionProof; - - fn verify_transaction_inclusion_proof( - proof: &Self::TransactionInclusionProof, - ) -> Option { - let is_transaction_finalized = crate::BridgeRialtoPoa::verify_transaction_finalized( - proof.block, - proof.index, - &proof.proof, - ); - - if !is_transaction_finalized { - return None - } - - proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn genesis_hash_matches() { - assert_eq!( - genesis_header().compute_hash(), - hex!("1468e1a0fa20d30025a5a0f87e1cced4fdc393b84b7d2850b11ca5863db482cb").into(), - ); - } - - #[test] - fn pruning_strategy_keeps_enough_headers() { - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 1_000), - 0, - "1_000 <= 5_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 5_000), - 0, - "5_000 <= 5_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 10_000), - 5_000, - "5_000 <= 10_000 => we're ready to prune first 5_000 headers", - ); - } -} diff --git a/ci.Dockerfile b/ci.Dockerfile index 0bd2bc4dae83..eec619a79831 100644 --- a/ci.Dockerfile +++ b/ci.Dockerfile @@ -24,7 +24,7 @@ USER user WORKDIR /home/user -ARG PROJECT=ethereum-poa-relay +ARG PROJECT=substrate-relay COPY --chown=user:user ./${PROJECT} ./ COPY --chown=user:user ./bridge-entrypoint.sh ./ diff --git a/deployments/README.md b/deployments/README.md index f9207e6d683e..920935d5fdb8 100644 --- a/deployments/README.md +++ b/deployments/README.md @@ -44,16 +44,16 @@ the monitoring Compose file is _not_ optional, and must be included for bridge d ### Running and Updating Deployments We currently support two bridge deployments -1. Ethereum PoA to Rialto Substrate -2. Rialto Substrate to Millau Substrate +1. Rialto Substrate to Millau Substrate +2. Westend Substrate to Millau Substrate These bridges can be deployed using our [`./run.sh`](./run.sh) script. The first argument it takes is the name of the bridge you want to run. Right now we only support two -bridges: `poa-rialto` and `rialto-millau`. +bridges: `rialto-millau` and `westend-millau`. ```bash -./run.sh poa-rialto +./run.sh rialto-millau ``` If you add a second `update` argument to the script it will pull the latest images from Docker Hub @@ -66,7 +66,7 @@ and restart the deployment. You can also bring down a deployment using the script with the `stop` argument. ```bash -./run.sh poa-rialto stop +./run.sh rialto-millau stop ``` ### Adding Deployments @@ -80,7 +80,6 @@ not strictly required. ## General Notes Rialto authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. -Rialto-PoA authorities are named: `Arthur`, `Bertha`, `Carlos`. Millau authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. Both authorities and following accounts have enough funds (for test purposes) on corresponding Substrate chains: @@ -89,8 +88,8 @@ Both authorities and following accounts have enough funds (for test purposes) on - on Millau: `Ferdie`, `George`, `Harry`. Names of accounts on Substrate (Rialto and Millau) chains may be prefixed with `//` and used as -seeds for the `sr25519` keys. This seed may also be used in the signer argument in Substrate -and PoA relays. Example: +seeds for the `sr25519` keys. This seed may also be used in the signer argument in Substrate relays. +Example: ```bash ./substrate-relay relay-headers rialto-to-millau \ @@ -105,13 +104,6 @@ and PoA relays. Example: Some accounts are used by bridge components. Using these accounts to sign other transactions is not recommended, because this may lead to nonces conflict. -Following accounts are used when `poa-rialto` bridge is running: - -- Rialto's `Alice` signs relay transactions with new Rialto-PoA headers; -- Rialto's `Bob` signs relay transactions with Rialto-PoA -> Rialto currency exchange proofs. -- Rialto-PoA's `Arthur`: signs relay transactions with new Rialto headers; -- Rialto-PoA's `Bertha`: signs currency exchange transactions. - Following accounts are used when `rialto-millau` bridge is running: - Millau's `Charlie` signs complex headers+messages relay transactions on Millau chain; @@ -133,10 +125,10 @@ Following accounts are used when `westend-millau` bridge is running: When the network is running you can query logs from individual nodes using: ```bash -docker logs rialto_poa-node-bertha_1 -f +docker logs rialto_millau-node-charlie_1 -f ``` -To kill all left over containers and start the network from scratch next time: +To kill all leftover containers and start the network from scratch next time: ```bash docker ps -a --format "{{.ID}}" | xargs docker rm # This removes all containers! ``` @@ -190,7 +182,6 @@ Here are the arguments currently supported: - `PROJECT`: Project to build withing bridges repo. Can be one of: - `rialto-bridge-node` - `millau-bridge-node` - - `ethereum-poa-relay` - `substrate-relay` ### GitHub Actions diff --git a/deployments/bridges/poa-rialto/Front-end.Dockerfile b/deployments/bridges/poa-rialto/Front-end.Dockerfile deleted file mode 100644 index 427f0504e57d..000000000000 --- a/deployments/bridges/poa-rialto/Front-end.Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM node:12 as build-deps - -# install tools and dependencies -RUN set -eux; \ - apt-get install -y git - -# clone UI repo -RUN cd /usr/src/ && git clone https://github.com/paritytech/bridge-ui.git -WORKDIR /usr/src/bridge-ui -RUN yarn -ARG SUBSTRATE_PROVIDER -ARG ETHEREUM_PROVIDER -ARG EXPECTED_ETHEREUM_NETWORK_ID - -ENV SUBSTRATE_PROVIDER $SUBSTRATE_PROVIDER -ENV ETHEREUM_PROVIDER $ETHEREUM_PROVIDER -ENV EXPECTED_ETHEREUM_NETWORK_ID $EXPECTED_ETHEREUM_NETWORK_ID - -RUN yarn build:docker - -# Stage 2 - the production environment -FROM nginx:1.12 -COPY --from=build-deps /usr/src/bridge-ui/nginx/*.conf /etc/nginx/conf.d/ -COPY --from=build-deps /usr/src/bridge-ui/dist /usr/share/nginx/html -EXPOSE 80 -CMD ["nginx", "-g", "daemon off;"] diff --git a/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json b/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json deleted file mode 100644 index 7e197bb882f8..000000000000 --- a/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json +++ /dev/null @@ -1,474 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 7, - "x": 0, - "y": 0 - }, - "id": 2, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_best_block_numbers", - "instant": true, - "interval": "", - "legendFormat": "Best {{type}} block", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best finalized blocks", - "type": "stat" - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 5, - "x": 7, - "y": 0 - }, - "id": 12, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_processed_blocks", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of processed blocks since last restart", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 6, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 6, - "x": 18, - "y": 0 - }, - "id": 8, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_process_cpu_usage_percentage", - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 7 - }, - "id": 14, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_processed_transactions", - "instant": true, - "interval": "", - "legendFormat": "{{type}} transactions", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of processed transactions since last restart", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Ethereum PoA to Rialto Exchange Dashboard", - "uid": "relay-poa-to-rialto-exchange", - "version": 1 -} diff --git a/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json b/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json deleted file mode 100644 index 05d06e949819..000000000000 --- a/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Ethereum PoA to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}) - max(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Ethereum PoA to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of new Headers on Ethereum PoA (Last 2 Mins)", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Rialto (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Ethereum_to_Substrate_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Ethereum PoA to Rialto Header Sync Dashboard", - "uid": "relay-poa-to-rialto-headers", - "version": 1 -} diff --git a/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json b/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json deleted file mode 100644 index 149c637fcb15..000000000000 --- a/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Rialto to Ethereum PoA)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}) - max(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Rialto to Ethereum PoA)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of new Headers on Rialto (Last 2 Mins)", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Ethereum PoA (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Substrate_to_Ethereum_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Rialto to Ethereum PoA Header Sync Dashboard", - "uid": "relay-rialto-to-poa-headers", - "version": 1 -} diff --git a/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml b/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml deleted file mode 100644 index b0038008ef6d..000000000000 --- a/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml +++ /dev/null @@ -1,4 +0,0 @@ -- targets: - - relay-headers-poa-to-rialto:9616 - - relay-poa-exchange-rialto:9616 - - relay-headers-rialto-to-poa:9616 diff --git a/deployments/bridges/poa-rialto/docker-compose.yml b/deployments/bridges/poa-rialto/docker-compose.yml deleted file mode 100644 index 6bdcb2301242..000000000000 --- a/deployments/bridges/poa-rialto/docker-compose.yml +++ /dev/null @@ -1,94 +0,0 @@ -# This Compose file should be built using the Rialto and Eth-PoA node -# compose files. Otherwise it won't work. -# -# Exposed ports: 9616, 9716, 9816, 9916, 8080 - -version: '3.5' -services: - # We override these nodes to make sure we have the correct chain config for this network. - poa-node-arthur: &poa-node - volumes: - - ./bridges/poa-rialto/poa-config:/config - poa-node-bertha: - <<: *poa-node - poa-node-carlos: - <<: *poa-node - - # We provide an override for this particular node since this is a public facing - # node which we use to connect from things like Polkadot JS Apps. - rialto-node-charlie: - environment: - VIRTUAL_HOST: rialto.bridges.test-installations.parity.io,wss.rialto.brucke.link - VIRTUAL_PORT: 9944 - LETSENCRYPT_HOST: rialto.bridges.test-installations.parity.io,wss.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - - relay-headers-poa-to-rialto: ð-poa-relay - image: paritytech/ethereum-poa-relay - entrypoint: /entrypoints/relay-headers-poa-to-rialto-entrypoint.sh - volumes: - - ./bridges/poa-rialto/entrypoints:/entrypoints - environment: - RUST_LOG: rpc=trace,bridge=trace - ports: - - "9616:9616" - depends_on: &all-nodes - - poa-node-arthur - - poa-node-bertha - - poa-node-carlos - - rialto-node-alice - - rialto-node-bob - - rialto-node-charlie - - rialto-node-dave - - rialto-node-eve - - relay-poa-exchange-rialto: - <<: *eth-poa-relay - entrypoint: /entrypoints/relay-poa-exchange-rialto-entrypoint.sh - ports: - - "9716:9616" - - relay-headers-rialto-to-poa: - <<: *eth-poa-relay - entrypoint: /entrypoints/relay-headers-rialto-to-poa-entrypoint.sh - ports: - - "9816:9616" - - poa-exchange-tx-generator: - <<: *eth-poa-relay - entrypoint: /entrypoints/poa-exchange-tx-generator-entrypoint.sh - environment: - EXCHANGE_GEN_MIN_AMOUNT_FINNEY: ${EXCHANGE_GEN_MIN_AMOUNT_FINNEY:-1} - EXCHANGE_GEN_MAX_AMOUNT_FINNEY: ${EXCHANGE_GEN_MAX_AMOUNT_FINNEY:-100000} - EXCHANGE_GEN_MAX_SUBMIT_DELAY_S: ${EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-60} - ports: - - "9916:9616" - depends_on: - - relay-headers-poa-to-rialto - - relay-headers-rialto-to-poa - - front-end: - build: - context: . - dockerfile: ./bridges/poa-rialto/Front-end.Dockerfile - args: - SUBSTRATE_PROVIDER: ${UI_SUBSTRATE_PROVIDER:-ws://localhost:9944} - ETHEREUM_PROVIDER: ${UI_ETHEREUM_PROVIDER:-http://localhost:8545} - EXPECTED_ETHEREUM_NETWORK_ID: ${UI_EXPECTED_ETHEREUM_NETWORK_ID:-105} - ports: - - "8080:80" - - # Note: These are being overridden from the top level `monitoring` compose file. - prometheus-metrics: - volumes: - - ./bridges/poa-rialto/dashboard/prometheus/targets.yml:/etc/prometheus/targets-poa-rialto.yml - depends_on: *all-nodes - - grafana-dashboard: - volumes: - - ./bridges/poa-rialto/dashboard/grafana:/etc/grafana/dashboards/poa-rialto:ro - environment: - VIRTUAL_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link - VIRTUAL_PORT: 3000 - LETSENCRYPT_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io diff --git a/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh b/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh deleted file mode 100755 index 9af373b0216f..000000000000 --- a/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash - -# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT -# -# This scripts periodically calls relay binary to generate PoA -> Substrate -# exchange transaction from hardcoded PoA senders (assuming they have -# enough funds) to hardcoded Substrate recipients. - -set -eu - -# Path to relay binary -RELAY_BINARY_PATH=${RELAY_BINARY_PATH:-./ethereum-poa-relay} -# Ethereum node host -ETH_HOST=${ETH_HOST:-poa-node-arthur} -# Ethereum node websocket port -ETH_PORT=${ETH_PORT:-8546} -# Ethereum chain id -ETH_CHAIN_ID=${ETH_CHAIN_ID:-105} - -# All possible Substrate recipients (hex-encoded public keys) -SUB_RECIPIENTS=( - # Alice (5GrwvaEF...) - "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"\ - # Bob (5FHneW46...) - "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48"\ - # Charlie (5FLSigC9...) - "90b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22"\ - # Dave (5DAAnrj7...) - "306721211d5404bd9da88e0204360a1a9ab8b87c66c1bc2fcdd37f3c2222cc20"\ - # Eve (5HGjWAeF...) - "e659a7a1628cdd93febc04a4e0646ea20e9f5f0ce097d9a05290d4a9e054df4e"\ - # Ferdie (5CiPPseX...) - "1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c" -) -# All possible Ethereum signers (hex-encoded private keys) -# (note that we're tracking nonce here => sender must not send concurrent transactions) -ETH_SIGNERS=( - # Bertha account (0x007594304039c2937a12220338aab821d819f5a4) and its current nonce (unknown by default) - "bc10e0f21e33456ade82182dd1ebdbdd89bca923d4e4adbd90fb5b44d7098cbe" "" -) -# Minimal exchange amount (in finney) -MIN_EXCHANGE_AMOUNT_FINNEY=${EXCHANGE_GEN_MIN_AMOUNT_FINNEY:-1} # 0.1 ETH -# Maximal exchange amount (in finney) -MAX_EXCHANGE_AMOUNT_FINNEY=${EXCHANGE_GEN_MAX_AMOUNT_FINNEY:-100000} # 100 ETH -# Max delay before submitting transactions (s) -MAX_SUBMIT_DELAY_S=${EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-60} - -while true -do - # sleep some time - SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` - echo "Sleeping $SUBMIT_DELAY_S seconds..." - sleep $SUBMIT_DELAY_S - - # select recipient - SUB_RECIPIENTS_MAX_INDEX=$((${#SUB_RECIPIENTS[@]} - 1)) - SUB_RECIPIENT_INDEX=`shuf -i 0-$SUB_RECIPIENTS_MAX_INDEX -n 1` - SUB_RECIPIENT=${SUB_RECIPIENTS[$SUB_RECIPIENT_INDEX]} - - # select signer - ETH_SIGNERS_MAX_INDEX=$(((${#ETH_SIGNERS[@]} - 1) / 2)) - ETH_SIGNERS_INDEX=`shuf -i 0-$ETH_SIGNERS_MAX_INDEX -n 1` - ETH_SIGNER_INDEX=$(($ETH_SIGNERS_INDEX * 2)) - ETH_SIGNER_NONCE_INDEX=$(($ETH_SIGNER_INDEX + 1)) - ETH_SIGNER=${ETH_SIGNERS[$ETH_SIGNER_INDEX]} - ETH_SIGNER_NONCE=${ETH_SIGNERS[$ETH_SIGNER_NONCE_INDEX]} - if [ -z $ETH_SIGNER_NONCE ]; then - ETH_SIGNER_NONCE_ARG= - else - ETH_SIGNER_NONCE_ARG=`printf -- "--eth-nonce=%s" $ETH_SIGNER_NONCE` - fi - - # select amount - EXCHANGE_AMOUNT_FINNEY=`shuf -i $MIN_EXCHANGE_AMOUNT_FINNEY-$MAX_EXCHANGE_AMOUNT_FINNEY -n 1` - EXCHANGE_AMOUNT_ETH=`printf "%s000" $EXCHANGE_AMOUNT_FINNEY` - - # submit transaction - echo "Sending $EXCHANGE_AMOUNT_ETH from PoA:$ETH_SIGNER to Substrate:$SUB_RECIPIENT. Nonce: $ETH_SIGNER_NONCE" - set -x - SUBMIT_OUTPUT=`$RELAY_BINARY_PATH 2>&1 eth-submit-exchange-tx \ - --sub-recipient=$SUB_RECIPIENT \ - --eth-host=$ETH_HOST \ - --eth-port=$ETH_PORT \ - --eth-chain-id=$ETH_CHAIN_ID \ - --eth-signer=$ETH_SIGNER \ - --eth-amount=$EXCHANGE_AMOUNT_ETH \ - $ETH_SIGNER_NONCE_ARG` - set +x - - # update sender nonce - SUBMIT_OUTPUT_RE='nonce: ([0-9]+)' - if [[ $SUBMIT_OUTPUT =~ $SUBMIT_OUTPUT_RE ]]; then - ETH_SIGNER_NONCE=${BASH_REMATCH[1]} - ETH_SIGNERS[$ETH_SIGNER_NONCE_INDEX]=$(($ETH_SIGNER_NONCE + 1)) - else - echo "Missing nonce in relay response: $SUBMIT_OUTPUT" - exit 1 - fi -done diff --git a/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh b/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh deleted file mode 100755 index b49362c03cdd..000000000000 --- a/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 60 -curl -v http://poa-node-arthur:8545/api/health -curl -v http://poa-node-bertha:8545/api/health -curl -v http://poa-node-carlos:8545/api/health -curl -v http://rialto-node-alice:9933/health -curl -v http://rialto-node-bob:9933/health -curl -v http://rialto-node-charlie:9933/health - -/home/user/ethereum-poa-relay eth-to-sub \ - --sub-host rialto-node-alice \ - --eth-host poa-node-arthur \ - --prometheus-host=0.0.0.0 diff --git a/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh b/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh deleted file mode 100755 index 8fbf9ac0d97c..000000000000 --- a/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 60 - -curl -v http://rialto-node-bob:9933/health -curl -v http://poa-node-bertha:8545/api/health - -# Try to deploy contracts first -# networkID = 0x69 -# Arthur's key. -/home/user/ethereum-poa-relay eth-deploy-contract \ - --eth-chain-id 105 \ - --eth-signer 0399dbd15cf6ee8250895a1f3873eb1e10e23ca18e8ed0726c63c4aea356e87d \ - --sub-host rialto-node-bob \ - --eth-host poa-node-bertha || echo "Failed to deploy contracts." - -sleep 10 -echo "Starting SUB -> ETH relay" -/home/user/ethereum-poa-relay sub-to-eth \ - --eth-contract c9a61fb29e971d1dabfd98657969882ef5d0beee \ - --eth-chain-id 105 \ - --eth-signer 0399dbd15cf6ee8250895a1f3873eb1e10e23ca18e8ed0726c63c4aea356e87d \ - --sub-host rialto-node-bob \ - --eth-host poa-node-bertha \ - --prometheus-host=0.0.0.0 diff --git a/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh b/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh deleted file mode 100755 index 54d6baeebb06..000000000000 --- a/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 60 -curl -v http://poa-node-arthur:8545/api/health -curl -v http://poa-node-bertha:8545/api/health -curl -v http://poa-node-carlos:8545/api/health -curl -v http://rialto-node-alice:9933/health -curl -v http://rialto-node-bob:9933/health -curl -v http://rialto-node-charlie:9933/health - -/home/user/ethereum-poa-relay eth-exchange-sub \ - --sub-host rialto-node-alice \ - --sub-signer //Bob \ - --eth-host poa-node-arthur \ - --prometheus-host=0.0.0.0 diff --git a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json b/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json deleted file mode 100644 index 9e26dfeeb6e6..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json b/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json deleted file mode 100644 index fa59a46480c2..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"dd04f316-bc9d-2deb-4a34-51014cd5f34f","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"aa91e6f0e6cf48208be4a1bcf15c6f30"},"ciphertext":"6e057599b13a87e8181bb39a40e14848fdc97958d493ddfa6bb1260350f69328","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"79dd8c09c5c066b830179a2558a51efca6d97c0db2c4128090a01835786823c5"},"mac":"8f8b8e2c9de29ec8eefc54a60055e30ae7ff4dd4a367eaf38880edb887da771e"},"address":"005e714f896a8b7cede9d38688c1a81de72a58e4","name":"","meta":"{}"} \ No newline at end of file diff --git a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json b/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json deleted file mode 100644 index 7168ec4f71f7..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"6d1e690f-0b52-35f7-989b-46100e7c65ed","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a5b4d0466834e75c9fd29c6cbbac57ad"},"ciphertext":"102ac328cbe66d8cb8515c42e3268776a9be4419a5cb7b79852860b1e691c15b","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"e8daf2e70086b0cacf925d368fd3f60cada1285e39a42c4cc73c135368cfdbef"},"mac":"1bc3b750900a1143c64ba9e677d69e1093aab47cb003ba09f3cd595a3b422db5"},"address":"007594304039c2937a12220338aab821d819f5a4","name":"","meta":"{}"} \ No newline at end of file diff --git a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json b/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json deleted file mode 100644 index 2f9759f7bdfe..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"ffaebba1-f1b9-8758-7034-0314040b1396","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"97f124bc8a7bf55d00eb2755c2b50364"},"ciphertext":"b87827816f33d2bef2dc3102a8a7744b86912f8ace10e45cb282a13487769ed2","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"3114c67a05bff7831d112083f566b176bfc874aea160eebadbe5564e406ee85c"},"mac":"e9bfe8fd6f612bc036bb57659297fc03db022264f5086a1b5726972d3ab6f64a"},"address":"004e7a39907f090e19b0b80a277e77b72b22e269","name":"","meta":"{}"} \ No newline at end of file diff --git a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json b/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json deleted file mode 100644 index f1df56b84136..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"ef9eb431-dc73-cf31-357e-736f64febe68","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"7077f1c4170d9fc2e05c5956be32fb51"},"ciphertext":"a053be448768d984257aeb8f9c7913e3f54c6e6e741accad9f09dd70c2d9828c","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"12580aa4624040970301e7474d3f9b2a93552bfe9ea2517f7119ccf8e91ebd0d"},"mac":"796dbb48adcfc09041fe39121632801d9f950d3c73dd47105180d8097d4f4491"},"address":"00eed42bf93b498f28acd21d207427a14074defe","name":"","meta":"{}"} \ No newline at end of file diff --git a/deployments/bridges/poa-rialto/poa-config/pass b/deployments/bridges/poa-rialto/poa-config/pass deleted file mode 100644 index f3097ab13082..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/pass +++ /dev/null @@ -1 +0,0 @@ -password diff --git a/deployments/bridges/poa-rialto/poa-config/poa-node-config b/deployments/bridges/poa-rialto/poa-config/poa-node-config deleted file mode 100644 index 2b3c56453d7b..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/poa-node-config +++ /dev/null @@ -1,20 +0,0 @@ -[parity] -chain = "/config/poa.json" -keys_path = "/config/keys" -no_persistent_txqueue = true - -[account] -password = ["/config/pass"] - -[network] -reserved_peers = "/config/reserved" - -[rpc] -apis = ["all"] -cors = ["moz-extension://*", "chrome-extension://*"] - -[mining] -force_sealing = true - -[misc] -unsafe_expose = true diff --git a/deployments/bridges/poa-rialto/poa-config/poa.json b/deployments/bridges/poa-rialto/poa-config/poa.json deleted file mode 100644 index 12a8a58f263b..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/poa.json +++ /dev/null @@ -1,184 +0,0 @@ -{ - "name": "BridgePoa", - "engine": { - "authorityRound": { - "params": { - "stepDuration": 10, - "validators": { - "list": [ - "0x005e714f896a8b7cede9d38688c1a81de72a58e4", - "0x007594304039c2937a12220338aab821d819f5a4", - "0x004e7a39907f090e19b0b80a277e77b72b22e269" - ] - }, - "validateScoreTransition": 0, - "validateStepTransition": 0, - "maximumUncleCountTransition": 0, - "maximumUncleCount": 0, - "emptyStepsTransition": "0xfffffffff", - "maximumEmptySteps": 1 - } - } - }, - "params": { - "accountStartNonce": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip140Transition": "0x0", - "eip145Transition": "0x0", - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip98Transition": "0x7fffffffffffff", - "gasLimitBoundDivisor": "0x0400", - "maxCodeSize": 24576, - "maxCodeSizeTransition": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID" : "0x69", - "validateChainIdTransition": "0x0", - "validateReceiptsTransition": "0x0" - }, - "genesis": { - "seal": { - "authorityRound": { - "step": "0x0", - "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x20000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x00", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData": "0x", - "gasLimit": "0x222222" - }, - "accounts": { - "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, - "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, - "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, - "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, - "0000000000000000000000000000000000000005": { "balance": "1", "builtin": { "name": "modexp", "activate_at": 0, "pricing": { "modexp": { "divisor": 20 } } } }, - "0000000000000000000000000000000000000006": { - "balance": "1", - "builtin": { - "name": "alt_bn128_add", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 500 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 150 }} - } - } - } - }, - "0000000000000000000000000000000000000007": { - "balance": "1", - "builtin": { - "name": "alt_bn128_mul", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 40000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 6000 }} - } - } - } - }, - "0000000000000000000000000000000000000008": { - "balance": "1", - "builtin": { - "name": "alt_bn128_pairing", - "pricing": { - "0": { - "price": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_pairing": { "base": 45000, "pair": 34000 }} - } - } - } - }, - "0x0000000000000000000000000000000000000009": { - "builtin": { - "name": "blake2_f", - "activate_at": "0xd751a5", - "pricing": { - "blake2_f": { - "gas_per_round": 1 - } - } - } - }, - "0x0000000000000000000000000000000000000010": { - "builtin": { - "name": "parse_substrate_header", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000011": { - "builtin": { - "name": "get_substrate_header_signal", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000012": { - "builtin": { - "name": "verify_substrate_finality_proof", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000013": { - "builtin": { - "name": "my_test", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x005e714f896a8b7cede9d38688c1a81de72a58e4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x007594304039c2937a12220338aab821d819f5a4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x004e7a39907f090e19b0b80a277e77b72b22e269": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x00eed42bf93b498f28acd21d207427a14074defe": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - } - } -} diff --git a/deployments/bridges/poa-rialto/poa-config/reserved b/deployments/bridges/poa-rialto/poa-config/reserved deleted file mode 100644 index 209d71b7fb30..000000000000 --- a/deployments/bridges/poa-rialto/poa-config/reserved +++ /dev/null @@ -1,3 +0,0 @@ -enode://543d0874df46dff238d62547160f9d11e3d21897d7041bbbe46a04d2ee56d9eaf108f2133c0403159624f7647198e224d0755d23ad0e1a50c0912973af6e8a8a@poa-node-arthur:30303 -enode://710de70733e88a24032e53054985f7239e37351f5f3335a468a1a78a3026e9f090356973b00262c346a6608403df2c7107fc4def2cfe4995ea18a41292b9384f@poa-node-bertha:30303 -enode://943525f415b9482f1c49bd39eb979e4e2b406f4137450b0553bffa5cba2928e25ff89ef70f7325aad8a75dbb5955eaecc1aee7ac55d66bcaaa07c8ea58adb23a@poa-node-carlos:30303 diff --git a/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json b/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json index 6fd0fb4ebc7d..32f3e53d6671 100644 --- a/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json +++ b/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json @@ -1441,7 +1441,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "avg_over_time(Millau_to_Rialto_MessageLane_00000000_process_cpu_usage_percentage[1m])", + "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-millau-rialto:9616'}[1m])", "instant": true, "interval": "", "legendFormat": "1 CPU = 100", @@ -1499,7 +1499,7 @@ "steppedLine": false, "targets": [ { - "expr": "Millau_to_Rialto_MessageLane_00000000_system_average_load", + "expr": "system_average_load{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Average system load in last {{over}}", "refId": "A" @@ -1592,7 +1592,7 @@ "steppedLine": false, "targets": [ { - "expr": "Millau_to_Rialto_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", + "expr": "process_memory_usage_bytes{instance='relay-millau-rialto:9616'} / 1024 / 1024", "interval": "", "legendFormat": "Process memory, MB", "refId": "A" diff --git a/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json b/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json index 33725dc60961..eaca8610aec7 100644 --- a/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json +++ b/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json @@ -1190,7 +1190,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "avg_over_time(Rialto_to_Millau_MessageLane_00000000_process_cpu_usage_percentage[1m])", + "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-millau-rialto:9616'}[1m])", "instant": true, "interval": "", "legendFormat": "1 CPU = 100", @@ -1248,7 +1248,7 @@ "steppedLine": false, "targets": [ { - "expr": "Rialto_to_Millau_MessageLane_00000000_system_average_load", + "expr": "system_average_load{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Average system load in last {{over}}", "refId": "A" @@ -1341,7 +1341,7 @@ "steppedLine": false, "targets": [ { - "expr": "Rialto_to_Millau_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", + "expr": "process_memory_usage_bytes{instance='relay-millau-rialto:9616'} / 1024 / 1024", "interval": "", "legendFormat": "Process memory, MB", "refId": "A" diff --git a/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json b/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json index 61ff281cc2a9..5280da748502 100644 --- a/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json +++ b/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json @@ -65,7 +65,7 @@ "targets": [ { "exemplar": true, - "expr": "Rialto_to_Millau_MessageLane_00000000_rialto_storage_proof_overhead", + "expr": "rialto_storage_proof_overhead{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Actual overhead", "refId": "A" @@ -169,14 +169,14 @@ "targets": [ { "exemplar": true, - "expr": "Westend_to_Millau_Sync_kusama_to_base_conversion_rate / Westend_to_Millau_Sync_polkadot_to_base_conversion_rate", + "expr": "kusama_to_base_conversion_rate{instance='relay-millau-rialto:9616'} / polkadot_to_base_conversion_rate{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Outside of runtime (actually Polkadot -> Kusama)", "refId": "A" }, { "exemplar": true, - "expr": "Rialto_to_Millau_MessageLane_00000000_rialto_millau_to_rialto_conversion_rate", + "expr": "Millau_Rialto_to_Millau_conversion_rate{instance='relay-millau-rialto:9616'}", "hide": false, "interval": "", "legendFormat": "At runtime", @@ -187,7 +187,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Rialto: Millau -> Rialto conversion rate", + "title": "Millau: Rialto -> Millau conversion rate", "tooltip": { "shared": true, "sort": 0, @@ -273,7 +273,7 @@ "targets": [ { "exemplar": true, - "expr": "Millau_to_Rialto_MessageLane_00000000_millau_storage_proof_overhead", + "expr": "millau_storage_proof_overhead{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Actual overhead", "refId": "A" @@ -377,14 +377,14 @@ "targets": [ { "exemplar": true, - "expr": "Westend_to_Millau_Sync_polkadot_to_base_conversion_rate / Westend_to_Millau_Sync_kusama_to_base_conversion_rate", + "expr": "polkadot_to_base_conversion_rate{instance='relay-millau-rialto:9616'} / kusama_to_base_conversion_rate{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Outside of runtime (actually Kusama -> Polkadot)", "refId": "A" }, { "exemplar": true, - "expr": "Millau_to_Rialto_MessageLane_00000000_millau_rialto_to_millau_conversion_rate", + "expr": "Rialto_Millau_to_Rialto_conversion_rate{instance='relay-millau-rialto:9616'}", "hide": false, "interval": "", "legendFormat": "At runtime", @@ -395,7 +395,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Millau: Rialto -> Millau conversion rate", + "title": "Rialto: Millau -> Rialto conversion rate", "tooltip": { "shared": true, "sort": 0, diff --git a/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json b/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json index 6003604fa531..1a3603512fdf 100644 --- a/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json +++ b/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json @@ -401,7 +401,7 @@ "steppedLine": false, "targets": [ { - "expr": "Westend_to_Millau_Sync_system_average_load", + "expr": "system_average_load{instance='relay-headers-westend-to-millau:9616'}", "interval": "", "legendFormat": "Average system load in last {{over}}", "refId": "A" @@ -500,7 +500,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "avg_over_time(Westend_to_Millau_Sync_process_cpu_usage_percentage[1m])", + "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-headers-westend-to-millau:9616'}[1m])", "instant": true, "interval": "", "legendFormat": "1 CPU = 100", @@ -615,7 +615,7 @@ "steppedLine": false, "targets": [ { - "expr": "Westend_to_Millau_Sync_process_memory_usage_bytes / 1024 / 1024", + "expr": "process_memory_usage_bytes{instance='relay-headers-westend-to-millau:9616'} / 1024 / 1024", "interval": "", "legendFormat": "Process memory, MB", "refId": "A" diff --git a/deployments/dev/poa-config/keys/BridgePoa/address_book.json b/deployments/dev/poa-config/keys/BridgePoa/address_book.json deleted file mode 100644 index 9e26dfeeb6e6..000000000000 --- a/deployments/dev/poa-config/keys/BridgePoa/address_book.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/deployments/dev/poa-config/keys/BridgePoa/arthur.json b/deployments/dev/poa-config/keys/BridgePoa/arthur.json deleted file mode 100644 index fa59a46480c2..000000000000 --- a/deployments/dev/poa-config/keys/BridgePoa/arthur.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"dd04f316-bc9d-2deb-4a34-51014cd5f34f","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"aa91e6f0e6cf48208be4a1bcf15c6f30"},"ciphertext":"6e057599b13a87e8181bb39a40e14848fdc97958d493ddfa6bb1260350f69328","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"79dd8c09c5c066b830179a2558a51efca6d97c0db2c4128090a01835786823c5"},"mac":"8f8b8e2c9de29ec8eefc54a60055e30ae7ff4dd4a367eaf38880edb887da771e"},"address":"005e714f896a8b7cede9d38688c1a81de72a58e4","name":"","meta":"{}"} \ No newline at end of file diff --git a/deployments/dev/poa-config/pass b/deployments/dev/poa-config/pass deleted file mode 100644 index f3097ab13082..000000000000 --- a/deployments/dev/poa-config/pass +++ /dev/null @@ -1 +0,0 @@ -password diff --git a/deployments/dev/poa-config/poa-node-config b/deployments/dev/poa-config/poa-node-config deleted file mode 100644 index 146bbac17cf9..000000000000 --- a/deployments/dev/poa-config/poa-node-config +++ /dev/null @@ -1,17 +0,0 @@ -[parity] -chain = "./deployments/dev/poa-config/poa.json" -keys_path = "./deployments/dev/poa-config/keys" -no_persistent_txqueue = true - -[account] -password = ["./deployments/dev/poa-config/pass"] - -[rpc] -apis = ["all"] -cors = ["moz-extension://*", "chrome-extension://*"] - -[mining] -force_sealing = true - -[misc] -unsafe_expose = true diff --git a/deployments/dev/poa-config/poa.json b/deployments/dev/poa-config/poa.json deleted file mode 100644 index ecc21766b035..000000000000 --- a/deployments/dev/poa-config/poa.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "name": "BridgePoa", - "engine": { - "authorityRound": { - "params": { - "stepDuration": 10, - "validators": { - "list": [ - "0x005e714f896a8b7cede9d38688c1a81de72a58e4" - ] - }, - "validateScoreTransition": 0, - "validateStepTransition": 0, - "maximumUncleCountTransition": 0, - "maximumUncleCount": 0, - "emptyStepsTransition": "0xfffffffff", - "maximumEmptySteps": 1 - } - } - }, - "params": { - "accountStartNonce": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip140Transition": "0x0", - "eip145Transition": "0x0", - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip98Transition": "0x7fffffffffffff", - "gasLimitBoundDivisor": "0x0400", - "maxCodeSize": 24576, - "maxCodeSizeTransition": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID" : "0x69", - "validateChainIdTransition": "0x0", - "validateReceiptsTransition": "0x0" - }, - "genesis": { - "seal": { - "authorityRound": { - "step": "0x0", - "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x20000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x00", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData": "0x", - "gasLimit": "0x222222" - }, - "accounts": { - "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, - "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, - "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, - "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, - "0000000000000000000000000000000000000005": { "balance": "1", "builtin": { "name": "modexp", "activate_at": 0, "pricing": { "modexp": { "divisor": 20 } } } }, - "0000000000000000000000000000000000000006": { - "balance": "1", - "builtin": { - "name": "alt_bn128_add", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 500 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 150 }} - } - } - } - }, - "0000000000000000000000000000000000000007": { - "balance": "1", - "builtin": { - "name": "alt_bn128_mul", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 40000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 6000 }} - } - } - } - }, - "0000000000000000000000000000000000000008": { - "balance": "1", - "builtin": { - "name": "alt_bn128_pairing", - "pricing": { - "0": { - "price": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_pairing": { "base": 45000, "pair": 34000 }} - } - } - } - }, - "0x0000000000000000000000000000000000000009": { - "builtin": { - "name": "blake2_f", - "activate_at": "0xd751a5", - "pricing": { - "blake2_f": { - "gas_per_round": 1 - } - } - } - }, - "0x0000000000000000000000000000000000000010": { - "builtin": { - "name": "parse_substrate_header", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000011": { - "builtin": { - "name": "get_substrate_header_signal", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000012": { - "builtin": { - "name": "verify_substrate_finality_proof", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000013": { - "builtin": { - "name": "my_test", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x005e714f896a8b7cede9d38688c1a81de72a58e4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x007594304039c2937a12220338aab821d819f5a4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x004e7a39907f090e19b0b80a277e77b72b22e269": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - } - } -} diff --git a/deployments/networks/OpenEthereum.Dockerfile b/deployments/networks/OpenEthereum.Dockerfile deleted file mode 100644 index d47708ca29bf..000000000000 --- a/deployments/networks/OpenEthereum.Dockerfile +++ /dev/null @@ -1,91 +0,0 @@ -FROM ubuntu:xenial AS builder - -# show backtraces -ENV RUST_BACKTRACE 1 - -ENV LAST_DEPS_UPDATE 2020-06-19 - -# install tools and dependencies -RUN set -eux; \ - apt-get update && \ - apt-get install -y file curl jq ca-certificates && \ - apt-get install -y cmake pkg-config libssl-dev git clang libclang-dev - -ENV LAST_CERTS_UPDATE 2020-06-19 - -RUN update-ca-certificates && \ - curl https://sh.rustup.rs -sSf | sh -s -- -y - -ENV PATH="/root/.cargo/bin:${PATH}" -ENV LAST_RUST_UPDATE="2020-09-09" -RUN rustup update stable && \ - rustup install nightly && \ - rustup target add wasm32-unknown-unknown --toolchain nightly - -RUN rustc -vV && \ - cargo -V && \ - gcc -v && \ - g++ -v && \ - cmake --version - -WORKDIR /openethereum - -### Build from the repo -ARG ETHEREUM_REPO=https://github.com/paritytech/openethereum.git -ARG ETHEREUM_HASH=344991dbba2bc8657b00916f0e4b029c66f159e8 -RUN git clone $ETHEREUM_REPO /openethereum && git checkout $ETHEREUM_HASH - -### Build locally. Make sure to set the CONTEXT to main directory of the repo. -# ADD openethereum /openethereum - -WORKDIR /parity-bridges-common - -### Build from the repo -# Build using `master` initially. -ARG BRIDGE_REPO=https://github.com/paritytech/parity-bridges-common -RUN git clone $BRIDGE_REPO /parity-bridges-common && git checkout master - -WORKDIR /openethereum -RUN cargo build --release --verbose || true - -# Then rebuild by switching to a different branch to only incrementally -# build the changes. -WORKDIR /parity-bridges-common -ARG BRIDGE_HASH=master -RUN git checkout . && git fetch && git checkout $BRIDGE_HASH -### Build locally. Make sure to set the CONTEXT to main directory of the repo. -# ADD . /parity-bridges-common - -WORKDIR /openethereum -RUN cargo build --release --verbose -RUN strip ./target/release/openethereum - -FROM ubuntu:xenial - -# show backtraces -ENV RUST_BACKTRACE 1 - -RUN set -eux; \ - apt-get update && \ - apt-get install -y curl - -RUN groupadd -g 1000 openethereum \ - && useradd -u 1000 -g openethereum -s /bin/sh -m openethereum - -# switch to user openethereum here -USER openethereum - -WORKDIR /home/openethereum - -COPY --chown=openethereum:openethereum --from=builder /openethereum/target/release/openethereum ./ -# Solve issues with custom --keys-path -RUN mkdir -p ~/.local/share/io.parity.ethereum/keys/ -# check if executable works in this container -RUN ./openethereum --version - -EXPOSE 8545 8546 30303/tcp 30303/udp - -HEALTHCHECK --interval=2m --timeout=5s \ - CMD curl -f http://localhost:8545/api/health || exit 1 - -ENTRYPOINT ["/home/openethereum/openethereum"] diff --git a/deployments/networks/eth-poa.yml b/deployments/networks/eth-poa.yml deleted file mode 100644 index 7291a2ccfd70..000000000000 --- a/deployments/networks/eth-poa.yml +++ /dev/null @@ -1,46 +0,0 @@ -# Compose file for quickly spinning up a local instance of an Ethereum PoA network. -# -# Note that this PoA network is only used for testing, so the configuration settings you see here -# are *not* recommended for a production environment. -# -# For example, do *not* keep your account key in version control, and unless you're _really_ sure -# you want to provide public access to your nodes do *not* publicly expose RPC methods. -version: '3.5' -services: - poa-node-arthur: &poa-node - image: hcastano/openethereum-bridge-builtins - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=arthur - - --engine-signer=0x005e714f896a8b7cede9d38688c1a81de72a58e4 - environment: - RUST_LOG: rpc=trace,txqueue=trace,bridge-builtin=trace - ports: - - "8545:8545" - - "8546:8546" - - "30303:30303" - - poa-node-bertha: - <<: *poa-node - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=bertha - - --engine-signer=0x007594304039c2937a12220338aab821d819f5a4 - ports: - - "8645:8545" - - "8646:8546" - - "31303:30303" - - poa-node-carlos: - <<: *poa-node - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=carlos - - --engine-signer=0x004e7a39907f090e19b0b80a277e77b72b22e269 - ports: - - "8745:8545" - - "8746:8546" - - "32303:30303" diff --git a/deployments/networks/millau.yml b/deployments/networks/millau.yml index 001f3a060947..d42c1d7d07cb 100644 --- a/deployments/networks/millau.yml +++ b/deployments/networks/millau.yml @@ -20,7 +20,7 @@ services: - --unsafe-rpc-external - --unsafe-ws-external environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace,sc_basic_authorship=trace + RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace,sc_basic_authorship=trace,beefy=debug ports: - "19933:9933" - "19944:9944" diff --git a/deployments/networks/rialto.yml b/deployments/networks/rialto.yml index 9b902a1ca28a..0a484b2dad75 100644 --- a/deployments/networks/rialto.yml +++ b/deployments/networks/rialto.yml @@ -20,7 +20,7 @@ services: - --unsafe-rpc-external - --unsafe-ws-external environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace + RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace,beefy=debug ports: - "9933:9933" - "9944:9944" diff --git a/deployments/run.sh b/deployments/run.sh index a82e0985c5e5..5c1cded1e832 100755 --- a/deployments/run.sh +++ b/deployments/run.sh @@ -4,7 +4,7 @@ # # To deploy a network you can run this script with the name of the bridge (or multiple bridges) you want to run. # -# `./run.sh poa-rialto rialto-millau` +# `./run.sh westend-millau rialto-millau` # # To update a deployment to use the latest images available from the Docker Hub add the `update` # argument after the bridge name. @@ -30,7 +30,6 @@ function show_help () { echo Error: $1 echo " " echo "Usage:" - echo " ./run.sh poa-rialto [stop|update] Run PoA <> Rialto Networks & Bridge" echo " ./run.sh rialto-millau [stop|update] Run Rialto <> Millau Networks & Bridge" echo " ./run.sh westend-millau [stop|update] Run Westend -> Millau Networks & Bridge" echo " " @@ -39,13 +38,12 @@ function show_help () { echo " --no-ui Disable UI" echo " " echo "You can start multiple bridges at once by passing several bridge names:" - echo " ./run.sh poa-rialto rialto-millau westend-millau [stop|update]" + echo " ./run.sh rialto-millau westend-millau [stop|update]" exit 1 } RIALTO=' -f ./networks/rialto.yml -f ./networks/rialto-parachain.yml' MILLAU=' -f ./networks/millau.yml' -ETH_POA=' -f ./networks/eth-poa.yml' MONITORING=' -f ./monitoring/docker-compose.yml' UI=' -f ./ui/docker-compose.yml' @@ -65,14 +63,6 @@ do shift continue ;; - poa-rialto) - BRIDGES+=($i) - NETWORKS+=${RIALTO} - RIALTO='' - NETWORKS+=${ETH_POA} - ETH_POA='' - shift - ;; rialto-millau) BRIDGES+=($i) NETWORKS+=${RIALTO} diff --git a/deployments/ui/docker-compose.yml b/deployments/ui/docker-compose.yml index 0a35fa98e234..8b3f8178c36e 100644 --- a/deployments/ui/docker-compose.yml +++ b/deployments/ui/docker-compose.yml @@ -7,7 +7,7 @@ services: VIRTUAL_PORT: 80 LETSENCRYPT_HOST: ui.brucke.link LETSENCRYPT_EMAIL: admin@parity.io - CHAIN_1_SUBSTRATE_PROVIDER: ws://localhost:9944 - CHAIN_2_SUBSTRATE_PROVIDER: ws://localhost:19944 + CHAIN_1_SUBSTRATE_PROVIDER: ${UI_CHAIN_1:-ws://localhost:9944} + CHAIN_2_SUBSTRATE_PROVIDER: ${UI_CHAIN_2:-ws://localhost:19944} ports: - "8080:80" diff --git a/diagrams/ARCHITECTURE.md b/diagrams/ARCHITECTURE.md deleted file mode 100644 index 6da88c448c95..000000000000 --- a/diagrams/ARCHITECTURE.md +++ /dev/null @@ -1,13 +0,0 @@ -# Bridge Architecture Diagrams - -## Bridge Relay -![General Overview](general-overview.svg) -![Bridge Relay Node](bridge-relay.svg) - -## Runtime Modules -![Ethereum Pallet](ethereum-pallet.svg) -![Currency Exchange Pallet](currency-exchange-pallet.svg) - -## Usage -![Cross Chain Fund Transfer](cross-chain-fund-transfer.svg) -![Parachain](parachain.svg) diff --git a/diagrams/bridge-architecture-diagrams.drawio b/diagrams/bridge-architecture-diagrams.drawio deleted file mode 100644 index bf073129c297..000000000000 --- a/diagrams/bridge-architecture-diagrams.drawio +++ /dev/null @@ -1 +0,0 @@ -5VjZctowFP0aHul4wYAfCVm70DR0JklfOootbE1kiQg52P36XmF5lSctaWkmDQ9gHclXvuccpGsN3HmSnQm0jj/xENOBY4XZwD0eOI499X34UUheII7tjQokEiTUo2pgSX5gDVoaTUmIN62BknMqyboNBpwxHMgWhoTg2/awFaftWdcowgawDBA10WsSyrhAp55V4+eYRHE5s23pnjsU3EeCp0zPxzjDRU+CyjB66CZGId82IPdk4M4F57K4SrI5porXkjH/4b1k2SKJtxfn4/lksTr3vgyLYKf73FIlJzCTzw6dffh0tZiFF6v01sZe9PCwHrLhpAj9iGiqmdS5yrykFtJeq8sgp4SFWAzco21MJF6uUaDwLVgKsFgmFFo2XN4pOnH48a4CKpI/pxKiYI1vpOD3lWBA6NFvZqoZecRC4qzhAJ35GeYJliKHIbp3OC2dmpfAVAPb2i/ORGNxwytuCSLt0aiKXrMNF5rwPcgfvxXy3ZclvzcLxyD/CFY2WGUc6wpTlBtSiIJYzd4vVDDJXRFK55xysYvmrnafQ5LuWU6bdNvyTNZtzzNZt+1Dse4arC85JSGR6smXCRISfuecSYFgl3jtEkymHd9PehRwrB7fe39BgO/Xs+D92bfTW9j6bvJ08fVKXvbYHkoCFq4RgEcpoXJ4wf4/3seTf8h771pvGbyfyBgLnCaAXvIZfC+w3HJxb7APWcs+iktKdyVLm2UNIUoipnYPIHi3dygOCVRNM92RkDBU0/RqWqtuHVSsUUesUc/mYLs9YjmHEss2xFqmd0A6kmp7WED5/NZEcgyR/JcWyVzJGv+oq5RJkii14GUnpaZer21Jc/zuVjLuWdL6BBgfSoCevbzxL/n/FRj3FbH/VIGRocA8FZB4oJ77JAtixKI3ooX10lpMDS0MljELZ+qsQy32FG02JNixCUWvCTf4BvJEfqNX+F3jVjXeeWXzOGt2HudlKyPyph4JrdtGT32TapT37KfYhqciwE+Qoo8XIMUIPxVPc4XD1imPqX9DXe+J6k3AC5wkj+2zoT7F9QyXnDDZsFdVLlbvq37HNkXm+r7moYsRyu2E8rslZkGOEQosoV5Cq2FrNWDz5EN7nZl8r+PqImbt8YrZ59veP7zt7f1sv5+FX5s17e6rzfOtaXdruoNZ03joP7QmNOtj12J4fa7tnvwE7Vtdd5s4EP01Pmf3oTlI4suPsZOm3U27aewmbd8wKIYNRi6I2O6vX2EEBklZY5/g4Lh5iTSIQbp3GM2McA8NZ8ur2Jn7n4iHwx7UvGUPXfQgBJreZ/8yyaqQGHoumcaBx2UbwSj4hYuBXJoGHk5qAykhIQ3mdaFLogi7tCZz4pgs6sMeSFh/6tyZYkkwcp1Qlt4HHvVzqQ2tjfwDDqZ+8WRg8hVPHPdxGpM04s+LSITzKzOnUMPXmPiORxYVEbrsoWFMCM1bs+UQhxmuBWL3H1f34fWjefXXl+Sn83Xw9/jz3btc2ftdbikXF+OI7q/aH7vjxXl4i6O7wSNBqO8u+S3akxOmHEm+VroqoF2DgzMlWg8NFn5A8WjuuNnVBbMmJvPpLGQ9wJoJjcljSQGDaNBw7nyNTzimeFnhlK/lCpMZpvGKDSmuIj53brJQ5/3Fhn+jz2V+hXtkcaHDbW5a6t6gxxocwB3ARBKYI5LGLpYgZZY0z5ruKgwYtvF2XCc5CdeTUlDa7T8pZVowl7cGd78Ot23JcKvQBqAttHUJ7bETTzF9C2gboGtoG7JtryKXSa4JmUuQs2VSlWMYkpAw/C/WXhYNHoIwFEROGEyjjCyG6JqqDLSAOfpzfmEWeF72GCWHdV/VFjum4HkMmRxTQQ5sixtruxPHkXeebbMZsqGTJIFbp2c3qLBX241loKo+WIFEIYtx6NDgqb6Hq+DhT7ghAZve5i0x9DOjRoUpYpysHTC/r7o5blVlia8SXXsXSdWasXLp+5NonyiJEG1FvimJsirJHlomsb+dxI2LAvuEU1WX2YPoYf3X5r5v9WuIIk12drqlMA+9LW8H2o9ZDw6yXjdbRSgLVK8g0FoDGbw5kA1D6xrIUAK5SBi0YRjgSI5ljwxzYNcxt5ACdKAAHbWGuZw2nOo+a5svts9aYk7d8j4L5HTkNFhUxKki9PuHvJJBtM2inLjc3gwlIt92Mgmsuou0gSLAOmQ2CeRM5PRYMeyusdIgtdjm8fAyoN8y4Nh7n/e+V65cLDmm685qL4Bz39MgeO+IOxWrmhJ/jbdEKCgSA5iWXSlsEK8fg3l0a7dFonmIrDY1DxMar2se8tFEnEZ/lHwk6aQHmWotnweTYOr/eWI+X6+TDRQ1d6Aqurfm86Gcq9RYYxwJrDEeT401EzWgzbIOSZsqOTHDjAMveGLNadYcpRNGg0OzZL92lJKPZE+uDD4xToFQqrEVlB40+oJmE0ovqY9jnM5+MyrH02bXGG1waHbspfq+Imc5aKkeNjjUOnKQ9dcHWc4Mb2Li4iRhwk8sRiAea9ziJA1pcmJuBwnBAVKk8Yf1O8WEKmx9nXt5GFB8v8JZOz2yrDPxHExBFzIOSZecV5d0lWc1v+nim7ySLnh2UMLg9i2njUIIAzdeVW7Kut+r1za3rXstFVD4XlAmhNvCn44WWox9Cy26kMTrtqDomUILswdnVRk2zwYkkjW+QCkGyaWYgxjoCxtaxwxIL851C96NfQ1IVNTvnAG9wAn2EXu4IqfY6uL6nTZQY99jW90UFDX8xO15A204YVv733mZwonZruOLT2afP67ebbw4f2E8a+SI7Ptuxv7UuvkBJ++CMfn3Uvvxzf14p/g5RRnTv8YHPVIw1/Q9fL6Oo9VZaPODHtbd/LgmZ2jz6yV0+R8=7Vxbd6I6FP41PtoFhOuj2jr1zFjbOr3oy1mIUahIHMCqfTi//SQQlJsa26qo0zWrJRsIw97f/vbOTkIJ1MbzH64+MZuoD+2SwPXnJXBdEgSeEzX8h0gWkUQSQ8nQtfpUthK0rQ8YXUilU6sPvcSFPkK2b02SQgM5DjT8hEx3XTRLXjZAdvKpE30IM4K2odtZ6YvV981QqkrcSn4LraEZPZnn6JmeboyGLpo69HkOcmB4ZqxH3dBLPVPvo1lMBG5KoOYi5IdH43kN2kSvkcY6wsNtxX2ZP92Zzdn0X69lvw3LYWf1XW5ZvpwLHf/TXT8/NH/fPFbE22Zt6nTrlvHDmtNbuHfdnlJN0nf1F5FqA+VA0glXAtWZafmwPdENcnaG0YRlpj+2cYvHhwPLtmvIRi5uB6oEVfoE6PpwnjLWlvfhl0rGwIVoDH13ge+jvQiRCRcpY89iAFCozIwbPzKoTkE3XPa9Uh8+oBrM12bj58/RU/WpbjxKgLt2TH5SeY0MtUmb0OlXCN5xy7B1z7OMpALh3PJfY8cdovcribau59QMQWNBG7uoE/ul7g6hv+EtlPA62E+4XNYOMT1LOWqOZC60dd96TzpqnurpE+6RhV9jaWZeSpoZiCnreWjqGpDeFcd/qqOlH0cdSamOQsVkOgqQsHztL4BDYkAH1nmbNpHrm2iIHN2+WUmrSW9cXfMLoQmFzRv0/QWlaH3qoyTAdgNLqNwNbwUYQSUygooZLV9yVJAxRWM8wcrEsrY1dLB+Be4W6n3obqBDfjsd7qbsDEeu5z5VSmCZl7Pct6S5uFPKe6M++RzRLTKiWyoUusX16C4yqEUtSdAFALVyjqCWGEEtFwrU2ehZw3olmH7WbYsQtu6Q3w3nnbZDrHvFBLusFg7sWiHAjvXsLl5XWS9pdqKslzRWOXDQ+lwSvNVJZEYn4fnv9pL87FXjk3hR2JJXPNbQF7HLJuQCb8NjuFRiwaUGlensXFA23oAPwv/Ct6bSYhaqdygXrL/0HrSTAMPsMHTwsYGBgkMhqBKntQzdrtATY6vfD7EMPetD7wX9EYhR5eHOpWpJus4F3WbXyg6CaUGEPqUUryzk0QZ3xalASWi8TN30swCLLkGDgQf3MvIRGKoMpxdGmRmCK1QcFdWMMTowGySL4zxL9HzZecrcFR45yUV3l+xI6jrIZ2YQ/xpYGPIE4STfWQS5wZosx0Tj3tQ7XoajiqkMB+RkOOCg9TnhLEswCiMRgWLxUBbmBeehCDzfwkOprKnonASyFbOSINtYX9W+9Y4Ph+SQJGGhED8kJj+Gk0WF9FXxvBM7k19IL8jYg9Wh1e926PwcX9SUqyRcgXbY0nm2DNPATLEs1RYt8oHUIApw2cin5gQ+dV9xD5xlHUtldBPtQG6SqslL3GGdJJvXL6u+T453irMaonLsmpgoHiV0rQlD/GHDkMbqX6wTxl+dCVZSM/6ZqeA9e1i26nSS8yrHd6vI9ucVj6JCzxFqxl8r/GfLc49wprskXLSnvbHl+0WFNs+liwxHn0aJoHxm2GZdPMQLhcK2kJ0LP25ETwwrV+H9MBGd3YrfXitaN30EUiGdZwvpu85rZeapwCHmqfhsnIuVcl1oQKK64Hwgjmq5oJ6l22MPajGLpmpWefVc8aD1XPHIznzUKlHEtMXNzzNBdt8rLLOh97erGyMs+oHQKaQzIl+0dEYElxwvWVe3HszF5FS+Cw5cZOKz8wD3U8fyTCyr6qfoYsIeR8NPHnRbvTey4UXg7HAWi9z6DF1rsEiVDeypYfUNU3f98JYGtYNvdpvC3POvX7yH8MrNtXI59v7UpDEjxFScZwH8b4Acnzomz9N2tKWjRABHfrDcw3dazhBLAWn5LhrBFpH5RLM8t7s/7lApBHLCiKoCMkbU5KwNFWFnG+JmzIwbzVozYRBsBlhXAleZYtpziTawU5BZOa5m6s4wmuNcZ+4yNvft80ipdM1XFnMrF2BuoIgFNPdmO/6H7fgxrvm1udBnsaPKasd45OTVHAvm2ybJvrpr0FuCILo1ons4rvjRxqVoYxeJvsvdTD0bYfAHorplL2k8tqPpiuMAL8RnY1dzs/eYEbFpyFR+MgEoc7HbchKB9I3fnAEorKu8lE/smlpuQkyMliLhbqnC+gExewrwCaqzBviXA2fLwWtptUSJTNGSfZypNUrrHAa+dZu/P0Yv/zy3OiwOo10A8UnRkOaEiM/GdpSu1cZb76HMYEeVY7XjX+I7JPGxLkJRWKfXz4P4GJP24ajbNLtQaz5UZBYnEL5CZusQLnGnQnOpndzROoAtLBctZT4Gy4nYwI7VgvPH4Q2LgcGFsJysiaIIBFVTT4vvVNZqqsK6Le48+C53hcM6pzDsbtPSfk0rr9OfLE4hHiiFC66jHfNbuS64A2M8K4n1PQh+9lmoUlKkyOdNt4hZXEWlkf2wYsPBJ+J44DD8EClx+MglH2nZCBAPdZuPd3B0bzoeC0CkC8jxZYk/uRy/iu340pOa7WlbYLEjc03yxKPfacW8KHXfvtCc9fsG5xHzmro7KsX2j5PnClydljX6m12jNek2u8q8/CJe1Vlc4xLqt4qqnRzFPWM7jv80fjVuJn9Y7Pi3fltEilNZP3ahXlb99t6dku/cca2gPnvLUp/VsEM8d64mPX94x+IQl1Cf1YTkXvxTIDbjT7fZK5uPVqs1Z7Cj9rc+W0hiY94keFn12bBeEWx1qgaGx+ymB8tTSkI1mcWx0V4Hu4v8Xgei+MQyH6/xrO5ylpVcnudSKzNOoJbLYxPXXzv3//hig8XEzMX6y2TEZQlY40T1SOzIvHeNdSvOebDjXTBTv4cJLI15fuMsaU9OrcqMvoZ5VNK7cbaUKna18ZdKFSdvY16W0itHD2llLFp9ajwkhdW33MHN/w==5VpdV6M8EP41vXRPAqUtl9rWj3P86K67x1dv9qSQQjQQDMG2/vo3gSCf1lqrrWe9kUyGgTzzzGQmtGMOg8UJR5F/wVxMOwZwFx1z1DEMCLq2/Kcky1xidTOJx4mrZYXgmjzjXFFLE+LiuKIoGKOCRFWhw8IQO6IiQ5yzeVVtxmj1qRHycENw7SDalN4QV/iZdGCBQn6KiefnT4ZAz0yR8+BxloT6eSELcTYToNyMVo195LJ5SWSOO+aQMyayq2AxxFThmiN2PT4fJ/dD93Hi9/2jx8mjMWEHmbHj99zysjiOQ7Gx6cNxd+CeUfE4t9njYhksEXIODL00scyRxK4EVg8ZFz7zWIjouJAepWhhZRXIUaFzzlgkhVAK77EQS80SlAgmRb4IqJ6Vq+DL//T96eBWDX5Y+XC0KE+Olnq0Jgwarpgl3NGLurzD4Z+Zc3N7B/4KByx/To2hhgsIxD0sVmCk7SlgSmTTIJ9gFmD5klKBY4oEeaoyEmliey96hYvkhfZSOvwTY341vVfhYQCKpphmt/7mKIyRIwgLM5s0cYjr+IiLTP1Me9h7uLvw77B98fOwl2nmvn99/VWulHhQ8tfcJwJfRyhFcy4zSIUESgNxR3vbUo6asVDoMYR6PGSU8dS0CdI/KY+lTRJ6UmqqkeDsAV8pmVAuh+D9Tn/CXODFSjfls3lY65RnQo3ZvMgfthb5pdTRHbzbsXJY8u07orO3i+jcPMpa17BulFlfFGXrIa/f+gnRRD9pgngsdwbQiMaSd6pB0RY2G+O8PrFNWCU27IEGsV/IX2Z2rrd1MM3dbjLFvnJbntv6JrNq83iT/mCv6N9t0P9WVndtXjzP9qky8ogSL5TXjkQQy5R/pLhLZK12qCcC4rqZk3FMntE0taewjxgJRboW66hjjVq9sYphjSB5KTj1QyqVW1vwHIAfEPT7lQDKi6S1odfWJ2o1JRU2m8WSA3XfvLzE5u6CO9kovlmEwa3Xcemth5yjZUlBc7hJgzw996vpuduvle01fQgGg1U3yIvsFbbLqH4jA1yyPU4AeQRsJQPYwLYrkO9/Amh46yzOYFPPZzP1EqqtJjK+ZTi7qvGlzHnomMfNIsZnwTSJd1jAAKuK/sBqFjBmSwED4WdVMHDHJcxX9cmrKvPvkmCtbq2xA/YX5Evrm1VMcJsl06BnGd8rYTbddYqeVHs3kzGrUieiHCNXLXOKcahQoYgEKpr3MGN27VrL121p+UBbxgSfljFBA6bvd3bRXzPz2dtOfB+Cvlm6jXDEYqIiS8YXS1/EIRHBWbA5joRdNFm9D4cZg+6+HWbAnRyZb5nY9s629A9hbzeY/QvPEVfl7HUyDYhQu+de8hgCo9rFfSaRX/uQMA7dzNb7PyCsYMe/+wUB1JJTW6PyhZ8Q7Mu+Efds/3QijJuDoTGZLY5bDrIPFRicOGm1g9NecM4SmvaEShZKWSqdSbRljvBJs46NfRSpS1miUpaIt+MqwlxWT2lxm980KUSf5qJe7bClv2bQDbawe+C/1IPPEb8ySASdE352FrHdfG/dfPNoXcO6X3T6u9o7Vr11KRCGLFRMUkznLMgIj1W/JYv9hEuwnGVRMVEmaVuZ2cdtps73Ly2XWmFvnky9co74z56lrGLrm7FT9OCGNr1pz52fntRbSbvGjGyJ+q6CHA1Djcq9bijDYIWhj5wCyGHxw5xMvfjlkzn+Hw==5VhdU6MwFP01POoAgaqPtlZ3R3d0ts7sc4BbyBhIJ4R++Os3KeEzONZVnLr2BXJJbsg599x7i4Vm6faG41Xyi0VALdeOtha6slzXsb0LeVGWXWXxvdIScxJpW2NYkGeoJmprQSLIOxMFY1SQVdcYsiyDUHRsmHO26U5bMtrddYVjMAyLEFPT+odEIimt577d2H8AiZNqZ8fWTwIcPsWcFZneL2MZlE9SXLnRU/MER2zTMqG5hWacMVHepdsZUIVrhdht+pM4cjMes+y68Cfp+vH2pHR2/ZYl9eE4ZOJjXbul6zWmhUZyLhLgUKT6zGJXQSyPv1K34Y6SLAJuoekmIQIWKxwq+0aGlrQlIqVy5MjbQMEK0V1QG2qw7wshvYC2H3g+jcMauIBti3d93htgKQi+k1P0Uxdp6nRYu+d6vGlipI6EpB0f1UKs4zKufTcIyxsN8hsARwbgiyLIBccC/gfEvcnRIe4ZiF+GgvEX0S5SWk5AU3VuIpPMHQ6APrCcCMIyOSVgQrBUTqDqwbTGeMaoWie9oeX+1/JxSUms1grW44yVxMzqzGiPyI/jd/nxfZMfNEDPZCx2JgY7BjGQRZeqRCglUJznJOwC+DasIOpUEhOpFhL+ABKVjQPFgqy79WcIHr3DAyPy9RoiLnpEuD2Ec1bwEPSqdlrvOXK9riOv70hgHoMwHO3Zqo/97wRWHUCLQedUjheSNnm5Y+GTvDxuTVolEXthddnEWiehZBP4gIBSEkXKx5RDTp5xsPenNLNSR9wf2p9a/tXhkVHFYF9FdbuiN+nU/SF12ae263Xz3/uCpZrClsscxqHPMehzFX3zlKgXmK9Bh81QqsyYgAOK0gs1KML86V6uIkKBJbHzP7EVqPXXLkzOgN7Px8p8jtl7TWVvLbOTa/+W8bIzYOdlddfwvYL6WECiXr7x7QEg3c8sIY7ZU32PGoLcV1L/oTUE9boCrx/0Y9cQs0dDp3sZ4OjFJHQ89aMOwA8oIM7ki9UP/5uKz+uLr6+ZQ8XnnfUc9f8JjS0+swX3Wg2c6tzsB87Y8pgVWEXhuxV4IiXYbRSOXoFnBn++4u+G4/0bXMumITe4+4ItXP/bwpgtnBw2H/ZKppovp2j+Fw==3ZnLcpswFIafxstmEEIOLGPHSWbaTtx60aY7GVRgAsgjhI3z9BVG4ibqOB5fszL6EQfrO7+kI3sAx3H+yPAi+E49Eg1Mw8sH8H5gmsCwHPFRKGulIKtUfBZ6UquFWfhGVEepZqFH0lZHTmnEw0VbdGmSEJe3NMwYXbW7/aVR+60L7BNNmLk40tVfoceDUrWRUetPJPQD9WZgyDtz7L76jGaJfF9CE1LeibEKI7umAfboqiHByQCOGaW8vIrzMYkKrorY16c0c//APGEPy2ecvC3Wyx9fymAPH3mkGhwjCT9saLMMvcRRJklOafQqxsnlmPlaIRbDXxSX7joKE4+wARytgpCT2QK7hb4S1hJawONItIC4nBdYifdtXgkV7OeMiyhE6juOT3JYEsZJ3si7HO8joTHhbC26yLu2zJx0tXmLyvaqtogJZZ+gYQ+oRCxt6Veha8DiQjL+AG+o8f4MnC0TtUEr8E3QqA+0eSzQlgZ6wgPCSBYL9WeW8DAW89wQS2EWES0HrCQqsb2D/2hUrTZVC/VQBX1UjwUVaVBn2TzlDHNytVQRODfVoUZ1nDExbLf41pPcDXDiXy9f6Jybr/3+mksS766oQoplN8JpGrqCUMox47q8N8OUZswl72/G4rU+2RZPbiLEa9VEekYaxPsWX6UxEmEeLtuVVF8W5BumNBTDrRIOUXuXRUYnkeW45VPNAqUTyDLagar1TgUqwWiBNqaohr2/T5ytq9sIp2JqmcY4wGEiPu8+w8Ztd5Cb+lwFdo91zKPt22pl2DELo8+QBWA47TRA6/x5AFoeGgXUlN7VSbj+BHSnwbCn0jo1f/1kNhIn77IQEAv2Or3M7d8GHSv3HAXAsAclsI6GcodD154FAMlD/ltcGzeOM5Ttl00bQUu27wsyhmqsG40pYaKe48UU2WgHLifgjuWEstqF1BPDzmJoWc4N2q+i6M7rnlBHrimAfg49mPcS8dVK8yHVfGneq423aTWd13hMOXarWw/sTLW5XJs17Y41h90zy87G7ARCJy51gX6SP7ctD22xXQ9TzmU5DP2nEvyow5xuZe+c2GH6rxqH3nRBe8M96QK2q7tUNXQh9gJGpzyzbvf0FzCGnUjdUnhvg4lm/adK2b3+1wpO/gE= \ No newline at end of file diff --git a/diagrams/bridge-relay.svg b/diagrams/bridge-relay.svg deleted file mode 100644 index 2907a7c7fce9..000000000000 --- a/diagrams/bridge-relay.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Source
Source
Target
Target
Sync Loop
Sync Loop
Source Client
Source Client
RPC
RPC
RPC
RPC
run(source=sub, target=eth)
run(source=sub, target=eth)
run(source=eth, target=sub)
run(source=eth, target=sub)
Substrate Sync Loop
Substrate Sync Loop
Ethereum Sync Loop
Ethereum Sync Loop
Process Method Results
Process Method Results
Update Target Methods
Update Target Methods
Update Source Methods
Update Source Methods
Target Client
Target Client
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/diagrams/cross-chain-fund-transfer.svg b/diagrams/cross-chain-fund-transfer.svg deleted file mode 100644 index 5fd9ced1d436..000000000000 --- a/diagrams/cross-chain-fund-transfer.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Ethereum
Ethereum
Substrate
Substrate
Actor
Actor
1. Send Lock Tx
1. Send Lock Tx
2. Emit Event
2. Emit Event
Bridge Relay
Bridge Relay
3. Read Event
3. Read Event
4. Send Tx Proof
4. Send Tx Proof
5. Grant Funds
5. Grant Funds
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/diagrams/currency-exchange-pallet.svg b/diagrams/currency-exchange-pallet.svg deleted file mode 100644 index 1f1b2ef7b5ce..000000000000 --- a/diagrams/currency-exchange-pallet.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Transaction
Transaction
Parse Transaction
Parse Transaction
Yes
Yes
No
No
Is part of a finalized block?
Is part of a finalize...
Yes
Yes
Have funds already been claimed?
Have funds alrea...
Deposit into recipient account
Deposit into recipie...
Reward Submitter
Reward Submitter
End
End
A price feed would be needed for this
A price feed would b...
Convert from foreign currency into local currency
Convert from foreign...
No
No
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/diagrams/ethereum-pallet.svg b/diagrams/ethereum-pallet.svg deleted file mode 100644 index 934255be2260..000000000000 --- a/diagrams/ethereum-pallet.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Import Signed Header
Import Signed Header
Import Header
Import Header
Count Valid and Invalid Headers
Count Valid and Inva...
No
No
Yes
Yes
Did we finalize any headers
Did we finalize any h...
Yes
Yes
No
No
Is Signed
Is Signed
Import Unsigned Header
Import Unsigned Head...
Import Header
Import Header
Reward Submitter
Reward Submitter
Did we receive valid headers?
Did we receive valid he...
Track Good Submitter
Track Good Submitter
Punish Bad Submitter
Punish Bad Submitter
Verify Header
Verify Header
Check for Authority Set Changes
Check for Authori...
Check if new header finalizes old headers
Check if new head...
Header
Header
Import Header
Import Header
Insert Header into Storage
Insert Header int...
Mark Headers as Finalized
Mark Headers as F...
Prune Old Headers
Prune Old Headers
Imported Block Hash + Finalized Headers
Imported Block Ha...
New Header
New Header
End
End
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/diagrams/general-overview.svg b/diagrams/general-overview.svg deleted file mode 100644 index d7706893ab9d..000000000000 --- a/diagrams/general-overview.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Bridge Relay
Bridge Relay
Solidity Smart Contract
Solidity Smart Contract
Grandpa Built-In
Grandpa Built-In
Ethereum PoA Network
Ethereum PoA Network
Substrate Node
Substrate Node
Ethereum Runtime Module
Ethereum Runtime Module
Substrate Runtime Module
Substrate Runtime Module
Currency Exchange Runtime Module
Currency Exchange Runtime Module
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/diagrams/parachain.svg b/diagrams/parachain.svg deleted file mode 100644 index a1a15f172cf0..000000000000 --- a/diagrams/parachain.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Polkadot
Polkadot
Ethereum Runtime Module
Ethereum Runtime Module
Substrate Runtime Module
Substrate Runtime Module
Currency Exchange Runtime Module
Currency Exchange Runtime Module
Substrate Based Chain A
Substrate Based Chain A
Substrate Based Chain B
Substrate Based Chain B
Ethereum PoA Chain
Ethereum PoA Chain
Bridge Relays
Bridge Relays
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/poa-eth.md b/docs/poa-eth.md deleted file mode 100644 index 43b30f8bb737..000000000000 --- a/docs/poa-eth.md +++ /dev/null @@ -1,71 +0,0 @@ -# PoA Ethereum High Level Documentation - -NOTE: This is from the old README - -### Ethereum Bridge Runtime Module -The main job of this runtime module is to keep track of useful information an Ethereum PoA chain -which has been submitted by a bridge relayer. This includes: - - - Ethereum headers and their status (e.g are they the best header, are they finalized, etc.) - - Current validator set, and upcoming validator sets - -This runtime module has more responsibilties than simply storing headers and validator sets. It is -able to perform checks on the incoming headers to verify their general integrity, as well as whether -or not they've been finalized by the authorities on the PoA chain. - -This module is laid out as so: - -``` -├── ethereum -│ └── src -│ ├── error.rs // Runtime error handling -│ ├── finality.rs // Manage finality operations -│ ├── import.rs // Import new Ethereum headers -│ ├── lib.rs // Store headers and validator set info -│ ├── validators.rs // Track current and future PoA validator sets -│ └── verification.rs // Verify validity of incoming Ethereum headers -``` - -### Currency Exchange Runtime Module -The currency exchange module is used to faciliate cross-chain funds transfers. It works by accepting -a transaction which proves that funds were locked on one chain, and releases a corresponding amount -of funds on the recieving chain. - -For example: Alice would like to send funds from chain A to chain B. What she would do is send a -transaction to chain A indicating that she would like to send funds to an address on chain B. This -transaction would contain the amount of funds she would like to send, as well as the address of the -recipient on chain B. These funds would now be locked on chain A. Once the block containing this -"locked-funds" transaction is finalized it can be relayed to chain B. Chain B will verify that this -transaction was included in a finalized block on chain A, and if successful deposit funds into the -recipient account on chain B. - -Chain B would need a way to convert from a foreign currency to its local currency. How this is done -is left to the runtime developer for chain B. - -This module is one example of how an on-chain light client can be used to prove a particular action -was taken on a foreign chain. In particular it enables transfers of the foreign chain's native -currency, but more sophisticated modules such as ERC20 token transfers or arbitrary message transfers -are being worked on as well. - -## Ethereum Node -On the Ethereum side of things, we require two things. First, a Solidity smart contract to track the -Substrate headers which have been submitted to the bridge (by the relay), and a built-in contract to -be able to verify that headers have been finalized by the GRANDPA finality gadget. Together this -allows the Ethereum PoA chain to verify the integrity and finality of incoming Substrate headers. - -The Solidity smart contract is not part of this repo, but can be found -[here](https://github.com/svyatonik/substrate-bridge-sol/blob/master/substrate-bridge.sol) if you're -curious. We have the contract ABI in the `ethereum/relays/res` directory. - -## Rialto Runtime -The node runtime consists of several runtime modules, however not all of them are used at the same -time. When running an Ethereum PoA to Substrate bridge the modules required are the Ethereum module -and the currency exchange module. When running a Substrate to Substrate bridge the Substrate and -currency exchange modules are required. - -Below is a brief description of each of the runtime modules. - -## Bridge Relay -The bridge relay is responsible for syncing the chains which are being bridged, and passing messages -between them. The current implementation of the relay supportings syncing and interacting with -Ethereum PoA and Substrate chains. diff --git a/modules/currency-exchange/Cargo.toml b/modules/currency-exchange/Cargo.toml deleted file mode 100644 index dc06a342cc8e..000000000000 --- a/modules/currency-exchange/Cargo.toml +++ /dev/null @@ -1,50 +0,0 @@ -[package] -name = "pallet-bridge-currency-exchange" -description = "A Substrate Runtime module that accepts 'lock funds' transactions from a peer chain and grants an equivalent amount to a the appropriate Substrate account." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } -log = { version = "0.4.14", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-currency-exchange = { path = "../../primitives/currency-exchange", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-currency-exchange/std", - "bp-header-chain/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "serde", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking", - "sp-std", -] diff --git a/modules/currency-exchange/src/benchmarking.rs b/modules/currency-exchange/src/benchmarking.rs deleted file mode 100644 index 813c1bfe884d..000000000000 --- a/modules/currency-exchange/src/benchmarking.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Exchange module complexity is mostly determined by callbacks, defined by runtime. -//! So we are giving runtime opportunity to prepare environment and construct proof -//! before invoking module calls. - -use super::{ - Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, - Pallet as CurrencyExchangePallet, -}; -use sp_std::prelude::*; - -use frame_benchmarking::{account, benchmarks_instance_pallet}; -use frame_system::RawOrigin; - -const SEED: u32 = 0; -const WORST_TX_SIZE_FACTOR: u32 = 1000; -const WORST_PROOF_SIZE_FACTOR: u32 = 1000; - -/// Pallet we're benchmarking here. -pub struct Pallet, I: 'static>(CurrencyExchangePallet); - -/// Proof benchmarking parameters. -pub struct ProofParams { - /// Funds recipient. - pub recipient: Recipient, - /// When true, recipient must exists before import. - pub recipient_exists: bool, - /// When 0, transaction should have minimal possible size. When this value has non-zero value - /// n, transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. - pub transaction_size_factor: u32, - /// When 0, proof should have minimal possible size. When this value has non-zero value n, - /// proof size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. - pub proof_size_factor: u32, -} - -/// Config that must be implemented by runtime. -pub trait Config: CurrencyExchangeConfig { - /// Prepare proof for importing exchange transaction. - fn make_proof( - proof_params: ProofParams, - ) -> <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof; -} - -benchmarks_instance_pallet! { - // Benchmark `import_peer_transaction` extrinsic with the best possible conditions: - // * Proof is the transaction itself. - // * Transaction has minimal size. - // * Recipient account exists. - import_peer_transaction_best_case { - let i in 1..100; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: 0, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` extrinsic when recipient account does not exists. - import_peer_transaction_when_recipient_does_not_exists { - let i in 1..100; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: false, - transaction_size_factor: 0, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` when transaction size increases. - import_peer_transaction_when_transaction_size_increases { - let i in 1..100; - let n in 1..WORST_TX_SIZE_FACTOR; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: n, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` when proof size increases. - import_peer_transaction_when_proof_size_increases { - let i in 1..100; - let n in 1..WORST_PROOF_SIZE_FACTOR; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: 0, - proof_size_factor: n, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` extrinsic with the worst possible conditions: - // * Proof is large. - // * Transaction has large size. - // * Recipient account does not exists. - import_peer_transaction_worst_case { - let i in 1..100; - let m in WORST_TX_SIZE_FACTOR..WORST_TX_SIZE_FACTOR+1; - let n in WORST_PROOF_SIZE_FACTOR..WORST_PROOF_SIZE_FACTOR+1; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: false, - transaction_size_factor: m, - proof_size_factor: n, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - -} diff --git a/modules/currency-exchange/src/lib.rs b/modules/currency-exchange/src/lib.rs deleted file mode 100644 index 31b789dd97e7..000000000000 --- a/modules/currency-exchange/src/lib.rs +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that allows tokens exchange between two bridged chains. - -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_currency_exchange::{ - CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction, - RecipientsMap, -}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::ensure; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -/// Called when transaction is submitted to the exchange module. -pub trait OnTransactionSubmitted { - /// Called when valid transaction is submitted and accepted by the module. - fn on_valid_transaction_submitted(submitter: AccountId); -} - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// Handler for transaction submission result. - type OnTransactionSubmitted: OnTransactionSubmitted; - /// Represents the blockchain that we'll be exchanging currency with. - type PeerBlockchain: InclusionProofVerifier; - /// Peer blockchain transaction parser. - type PeerMaybeLockFundsTransaction: MaybeLockFundsTransaction< - Transaction = ::Transaction, - >; - /// Map between blockchains recipients. - type RecipientsMap: RecipientsMap< - PeerRecipient = ::Recipient, - Recipient = Self::AccountId, - >; - /// This blockchain currency amount type. - type Amount; - /// Converter from peer blockchain currency type into current blockchain currency type. - type CurrencyConverter: CurrencyConverter< - SourceAmount = ::Amount, - TargetAmount = Self::Amount, - >; - /// Something that could grant money. - type DepositInto: DepositInto; - } - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet {} - - #[pallet::call] - impl, I: 'static> Pallet { - /// Imports lock fund transaction of the peer blockchain. - #[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_peer_transaction( - origin: OriginFor, - proof: <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, - ) -> DispatchResult { - let submitter = frame_system::ensure_signed(origin)?; - - // verify and parse transaction proof - let deposit = prepare_deposit_details::(&proof)?; - - // make sure to update the mapping if we deposit successfully to avoid double spending, - // i.e. whenever `deposit_into` is successful we MUST update `Transfers`. - { - // if any changes were made to the storage, we can't just return error here, because - // otherwise the same proof may be imported again - let deposit_result = - T::DepositInto::deposit_into(deposit.recipient, deposit.amount); - match deposit_result { - Ok(_) => (), - Err(ExchangeError::DepositPartiallyFailed) => (), - Err(error) => return Err(Error::::from(error).into()), - } - Transfers::::insert(&deposit.transfer_id, ()) - } - - // reward submitter for providing valid message - T::OnTransactionSubmitted::on_valid_transaction_submitted(submitter); - - log::trace!( - target: "runtime", - "Completed currency exchange: {:?}", - deposit.transfer_id, - ); - - Ok(()) - } - } - - #[pallet::error] - pub enum Error { - /// Invalid peer blockchain transaction provided. - InvalidTransaction, - /// Peer transaction has invalid amount. - InvalidAmount, - /// Peer transaction has invalid recipient. - InvalidRecipient, - /// Cannot map from peer recipient to this blockchain recipient. - FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockchain currency. - FailedToConvertCurrency, - /// Deposit has failed. - DepositFailed, - /// Deposit has partially failed (changes to recipient account were made). - DepositPartiallyFailed, - /// Transaction is not finalized. - UnfinalizedTransaction, - /// Transaction funds are already claimed. - AlreadyClaimed, - } - - /// All transfers that have already been claimed. - #[pallet::storage] - pub(super) type Transfers, I: 'static = ()> = StorageMap< - _, - Blake2_128Concat, - ::Id, - (), - ValueQuery, - >; -} - -impl, I: 'static> Pallet { - /// Returns true if currency exchange module is able to import given transaction proof in - /// its current state. - pub fn filter_transaction_proof( - proof: &::TransactionInclusionProof, - ) -> bool { - if let Err(err) = prepare_deposit_details::(proof) { - log::trace!( - target: "runtime", - "Can't accept exchange transaction: {:?}", - err, - ); - - return false - } - - true - } -} - -impl, I: 'static> From for Error { - fn from(error: ExchangeError) -> Self { - match error { - ExchangeError::InvalidTransaction => Error::InvalidTransaction, - ExchangeError::InvalidAmount => Error::InvalidAmount, - ExchangeError::InvalidRecipient => Error::InvalidRecipient, - ExchangeError::FailedToMapRecipients => Error::FailedToMapRecipients, - ExchangeError::FailedToConvertCurrency => Error::FailedToConvertCurrency, - ExchangeError::DepositFailed => Error::DepositFailed, - ExchangeError::DepositPartiallyFailed => Error::DepositPartiallyFailed, - } - } -} - -impl OnTransactionSubmitted for () { - fn on_valid_transaction_submitted(_: AccountId) {} -} - -/// Exchange deposit details. -struct DepositDetails, I: 'static> { - /// Transfer id. - pub transfer_id: ::Id, - /// Transfer recipient. - pub recipient: ::Recipient, - /// Transfer amount. - pub amount: ::TargetAmount, -} - -/// Verify and parse transaction proof, preparing everything required for importing -/// this transaction proof. -fn prepare_deposit_details, I: 'static>( - proof: &<>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, -) -> Result, Error> { - // ensure that transaction is included in finalized block that we know of - let transaction = >::PeerBlockchain::verify_transaction_inclusion_proof(proof) - .ok_or(Error::::UnfinalizedTransaction)?; - - // parse transaction - let transaction = >::PeerMaybeLockFundsTransaction::parse(&transaction) - .map_err(Error::::from)?; - let transfer_id = transaction.id; - ensure!(!Transfers::::contains_key(&transfer_id), Error::::AlreadyClaimed); - - // grant recipient - let recipient = T::RecipientsMap::map(transaction.recipient).map_err(Error::::from)?; - let amount = T::CurrencyConverter::convert(transaction.amount).map_err(Error::::from)?; - - Ok(DepositDetails { transfer_id, recipient, amount }) -} - -#[cfg(test)] -mod tests { - // From construct_runtime macro - #![allow(clippy::from_over_into)] - - use super::*; - use bp_currency_exchange::LockFundsTransaction; - use frame_support::{ - assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight, - }; - use sp_core::H256; - use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, - }; - - type AccountId = u64; - - const INVALID_TRANSACTION_ID: u64 = 100; - const ALREADY_CLAIMED_TRANSACTION_ID: u64 = 101; - const UNKNOWN_RECIPIENT_ID: u64 = 0; - const INVALID_AMOUNT: u64 = 0; - const MAX_DEPOSIT_AMOUNT: u64 = 1000; - const SUBMITTER: u64 = 2000; - - type RawTransaction = LockFundsTransaction; - - pub struct DummyTransactionSubmissionHandler; - - impl OnTransactionSubmitted for DummyTransactionSubmissionHandler { - fn on_valid_transaction_submitted(submitter: AccountId) { - Transfers::::insert(submitter, ()); - } - } - - pub struct DummyBlockchain; - - impl InclusionProofVerifier for DummyBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = (bool, RawTransaction); - - fn verify_transaction_inclusion_proof( - proof: &Self::TransactionInclusionProof, - ) -> Option { - if proof.0 { - Some(proof.1.clone()) - } else { - None - } - } - } - - pub struct DummyTransaction; - - impl MaybeLockFundsTransaction for DummyTransaction { - type Transaction = RawTransaction; - type Id = u64; - type Recipient = AccountId; - type Amount = u64; - - fn parse(tx: &Self::Transaction) -> bp_currency_exchange::Result { - match tx.id { - INVALID_TRANSACTION_ID => Err(ExchangeError::InvalidTransaction), - _ => Ok(tx.clone()), - } - } - } - - pub struct DummyRecipientsMap; - - impl RecipientsMap for DummyRecipientsMap { - type PeerRecipient = AccountId; - type Recipient = AccountId; - - fn map( - peer_recipient: Self::PeerRecipient, - ) -> bp_currency_exchange::Result { - match peer_recipient { - UNKNOWN_RECIPIENT_ID => Err(ExchangeError::FailedToMapRecipients), - _ => Ok(peer_recipient * 10), - } - } - } - - pub struct DummyCurrencyConverter; - - impl CurrencyConverter for DummyCurrencyConverter { - type SourceAmount = u64; - type TargetAmount = u64; - - fn convert(amount: Self::SourceAmount) -> bp_currency_exchange::Result { - match amount { - INVALID_AMOUNT => Err(ExchangeError::FailedToConvertCurrency), - _ => Ok(amount * 10), - } - } - } - - pub struct DummyDepositInto; - - impl DepositInto for DummyDepositInto { - type Recipient = AccountId; - type Amount = u64; - - fn deposit_into( - _recipient: Self::Recipient, - amount: Self::Amount, - ) -> bp_currency_exchange::Result<()> { - match amount { - amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()), - amount if amount == MAX_DEPOSIT_AMOUNT * 10 => - Err(ExchangeError::DepositPartiallyFailed), - _ => Err(ExchangeError::DepositFailed), - } - } - } - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; - use crate as pallet_bridge_currency_exchange; - - construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Exchange: pallet_bridge_currency_exchange::{Pallet}, - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - } - - impl Config for TestRuntime { - type OnTransactionSubmitted = DummyTransactionSubmissionHandler; - type PeerBlockchain = DummyBlockchain; - type PeerMaybeLockFundsTransaction = DummyTransaction; - type RecipientsMap = DummyRecipientsMap; - type Amount = u64; - type CurrencyConverter = DummyCurrencyConverter; - type DepositInto = DummyDepositInto; - } - - fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - sp_io::TestExternalities::new(t) - } - - fn transaction(id: u64) -> RawTransaction { - RawTransaction { id, recipient: 1, amount: 2 } - } - - #[test] - fn unfinalized_transaction_rejected() { - new_test_ext().execute_with(|| { - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (false, transaction(0)) - ), - Error::::UnfinalizedTransaction, - ); - }); - } - - #[test] - fn invalid_transaction_rejected() { - new_test_ext().execute_with(|| { - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(INVALID_TRANSACTION_ID)), - ), - Error::::InvalidTransaction, - ); - }); - } - - #[test] - fn claimed_transaction_rejected() { - new_test_ext().execute_with(|| { - ::Transfers::insert(ALREADY_CLAIMED_TRANSACTION_ID, ()); - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(ALREADY_CLAIMED_TRANSACTION_ID)), - ), - Error::::AlreadyClaimed, - ); - }); - } - - #[test] - fn transaction_with_unknown_recipient_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.recipient = UNKNOWN_RECIPIENT_ID; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::FailedToMapRecipients, - ); - }); - } - - #[test] - fn transaction_with_invalid_amount_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = INVALID_AMOUNT; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::FailedToConvertCurrency, - ); - }); - } - - #[test] - fn transaction_with_invalid_deposit_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = MAX_DEPOSIT_AMOUNT + 1; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::DepositFailed, - ); - }); - } - - #[test] - fn valid_transaction_accepted_even_if_deposit_partially_fails() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = MAX_DEPOSIT_AMOUNT; - assert_ok!(Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction), - ),); - - // ensure that the transfer has been marked as completed - assert!(::Transfers::contains_key(0u64)); - // ensure that submitter has been rewarded - assert!(::Transfers::contains_key(SUBMITTER)); - }); - } - - #[test] - fn valid_transaction_accepted() { - new_test_ext().execute_with(|| { - assert_ok!(Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(0)), - ),); - - // ensure that the transfer has been marked as completed - assert!(::Transfers::contains_key(0u64)); - // ensure that submitter has been rewarded - assert!(::Transfers::contains_key(SUBMITTER)); - }); - } -} diff --git a/modules/ethereum-contract-builtin/Cargo.toml b/modules/ethereum-contract-builtin/Cargo.toml deleted file mode 100644 index ffb98bc6bd85..000000000000 --- a/modules/ethereum-contract-builtin/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "ethereum-contract-builtin" -description = "Small crate that helps Solidity contract to verify finality proof." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0" } -ethereum-types = "0.12.0" -finality-grandpa = "0.14.0" -hex = "0.4" -log = "0.4.14" - -# Runtime/chain specific dependencies - -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/modules/ethereum-contract-builtin/src/lib.rs b/modules/ethereum-contract-builtin/src/lib.rs deleted file mode 100644 index 4a830f8e0a38..000000000000 --- a/modules/ethereum-contract-builtin/src/lib.rs +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use codec::{Decode, Encode}; -use ethereum_types::U256; -use finality_grandpa::voter_set::VoterSet; -use rialto_runtime::{Block, BlockNumber, Hash, Header as RuntimeHeader}; -use sp_blockchain::Error as ClientError; -use sp_finality_grandpa::{AuthorityList, ConsensusLog, GRANDPA_ENGINE_ID}; - -/// Builtin errors. -#[derive(Debug)] -pub enum Error { - /// Failed to decode block number. - BlockNumberDecode, - /// Failed to decode Substrate header. - HeaderDecode(codec::Error), - /// Failed to decode the best voters set. - BestSetDecode(codec::Error), - /// The best voters set is invalid. - InvalidBestSet, - /// Failed to decode finality proof. - FinalityProofDecode(codec::Error), - /// Failed to verify justification. - JustificationVerify(Box), -} - -/// Substrate header. -#[derive(Debug, PartialEq)] -pub struct Header { - /// Header hash. - pub hash: Hash, - /// Parent header hash. - pub parent_hash: Hash, - /// Header number. - pub number: BlockNumber, - /// GRANDPA validators change signal. - pub signal: Option, -} - -/// GRANDPA validators set change signal. -#[derive(Debug, PartialEq)] -pub struct ValidatorsSetSignal { - /// Signal delay. - pub delay: BlockNumber, - /// New validators set. - pub validators: Vec, -} - -/// Convert from U256 to BlockNumber. Fails if `U256` value isn't fitting within `BlockNumber` -/// limits (the runtime referenced by this module uses u32 as `BlockNumber`). -pub fn to_substrate_block_number(number: U256) -> Result { - let substrate_block_number = match number == number.low_u32().into() { - true => Ok(number.low_u32()), - false => Err(Error::BlockNumberDecode), - }; - - log::trace!( - target: "bridge-builtin", - "Parsed Substrate block number from {}: {:?}", - number, - substrate_block_number, - ); - - substrate_block_number -} - -/// Convert from BlockNumber to U256. -pub fn from_substrate_block_number(number: BlockNumber) -> Result { - Ok(U256::from(number as u64)) -} - -/// Parse Substrate header. -pub fn parse_substrate_header(raw_header: &[u8]) -> Result { - let substrate_header = RuntimeHeader::decode(&mut &*raw_header) - .map(|header| Header { - hash: header.hash(), - parent_hash: header.parent_hash, - number: header.number, - signal: sp_runtime::traits::Header::digest(&header) - .log(|log| { - log.as_consensus().and_then(|(engine_id, log)| { - if engine_id == GRANDPA_ENGINE_ID { - Some(log) - } else { - None - } - }) - }) - .and_then(|log| ConsensusLog::decode(&mut &*log).ok()) - .and_then(|log| match log { - ConsensusLog::ScheduledChange(scheduled_change) => Some(ValidatorsSetSignal { - delay: scheduled_change.delay, - validators: scheduled_change.next_authorities.encode(), - }), - _ => None, - }), - }) - .map_err(Error::HeaderDecode); - - log::debug!( - target: "bridge-builtin", - "Parsed Substrate header {}: {:?}", - if substrate_header.is_ok() { - format!("<{}-bytes-blob>", raw_header.len()) - } else { - hex::encode(raw_header) - }, - substrate_header, - ); - - substrate_header -} - -/// Verify GRANDPA finality proof. -pub fn verify_substrate_finality_proof( - finality_target_number: BlockNumber, - finality_target_hash: Hash, - best_set_id: u64, - raw_best_set: &[u8], - raw_finality_proof: &[u8], -) -> Result<(), Error> { - let best_set = AuthorityList::decode(&mut &*raw_best_set) - .map_err(Error::BestSetDecode) - .and_then(|authorities| { - VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet) - }); - - log::debug!( - target: "bridge-builtin", - "Parsed Substrate authorities set {}: {:?}", - if best_set.is_ok() { - format!("<{}-bytes-blob>", raw_best_set.len()) - } else { - hex::encode(raw_best_set) - }, - best_set, - ); - - let best_set = best_set?; - - let verify_result = - sc_finality_grandpa::GrandpaJustification::::decode_and_verify_finalizes( - raw_finality_proof, - (finality_target_hash, finality_target_number), - best_set_id, - &best_set, - ) - .map_err(Box::new) - .map_err(Error::JustificationVerify) - .map(|_| ()); - - log::debug!( - target: "bridge-builtin", - "Verified Substrate finality proof {}: {:?}", - if verify_result.is_ok() { - format!("<{}-bytes-blob>", raw_finality_proof.len()) - } else { - hex::encode(raw_finality_proof) - }, - verify_result, - ); - - verify_result -} - -#[cfg(test)] -mod tests { - use super::*; - use rialto_runtime::DigestItem; - use sp_core::crypto::Public; - use sp_finality_grandpa::{AuthorityId, ScheduledChange}; - use sp_runtime::generic::Digest; - - #[test] - fn to_substrate_block_number_succeeds() { - assert_eq!(to_substrate_block_number(U256::zero()).unwrap(), 0); - assert_eq!( - to_substrate_block_number(U256::from(std::u32::MAX as u64)).unwrap(), - 0xFFFFFFFF - ); - } - - #[test] - fn to_substrate_block_number_fails() { - assert!(matches!( - to_substrate_block_number(U256::from(std::u32::MAX as u64 + 1)), - Err(Error::BlockNumberDecode) - )); - } - - #[test] - fn from_substrate_block_number_succeeds() { - assert_eq!(from_substrate_block_number(0).unwrap(), U256::zero()); - assert_eq!(from_substrate_block_number(std::u32::MAX).unwrap(), U256::from(std::u32::MAX)); - } - - #[test] - fn substrate_header_without_signal_parsed() { - let raw_header = RuntimeHeader { - parent_hash: [0u8; 32].into(), - number: 0, - state_root: "b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e7" - .parse() - .unwrap(), - extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" - .parse() - .unwrap(), - digest: Default::default(), - } - .encode(); - assert_eq!( - raw_header, - hex::decode("000000000000000000000000000000000000000000000000000000000000000000b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e703170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c11131400").unwrap(), - ); - - assert_eq!( - parse_substrate_header(&raw_header).unwrap(), - Header { - hash: "afbbeb92bf6ff14f60bdef0aa89f043dd403659ae82665238810ace0d761f6d0" - .parse() - .unwrap(), - parent_hash: Default::default(), - number: 0, - signal: None, - }, - ); - } - - #[test] - fn substrate_header_with_signal_parsed() { - let authorities = vec![ - (AuthorityId::from_slice(&[1; 32]), 101), - (AuthorityId::from_slice(&[3; 32]), 103), - ]; - let mut digest = Digest::default(); - digest.push(DigestItem::Consensus( - GRANDPA_ENGINE_ID, - ConsensusLog::ScheduledChange(ScheduledChange { - next_authorities: authorities.clone(), - delay: 8, - }) - .encode(), - )); - - let raw_header = RuntimeHeader { - parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" - .parse() - .unwrap(), - number: 8, - state_root: "822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aa" - .parse() - .unwrap(), - extrinsics_root: "e7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928" - .parse() - .unwrap(), - digest, - } - .encode(); - assert_eq!( - raw_header, - hex::decode("c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b20822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aae7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928040446524e4b59010108010101010101010101010101010101010101010101010101010101010101010165000000000000000303030303030303030303030303030303030303030303030303030303030303670000000000000008000000").unwrap(), - ); - - assert_eq!( - parse_substrate_header(&raw_header).unwrap(), - Header { - hash: "3dfebb280bd87a4640f89d7f2adecd62b88148747bff5b63af6e1634ee37a56e" - .parse() - .unwrap(), - parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" - .parse() - .unwrap(), - number: 8, - signal: Some(ValidatorsSetSignal { delay: 8, validators: authorities.encode() }), - }, - ); - } - - /// Number of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8; - /// Hash of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = - "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775"; - /// Id of authorities set that have generated example justification. Could be computed by - /// tracking every set change in canonized headers. - const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0; - /// Encoded authorities set that has generated example justification. Could be fetched from - /// `ScheduledChange` digest of the block that has scheduled this set OR by calling - /// `GrandpaApi::grandpa_authorities()` at appropriate block. - const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000"; - /// Example justification. Could be fetched by calling 'chain_getBlock' RPC. - const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900"; - - #[test] - fn substrate_header_parse_fails() { - assert!(matches!(parse_substrate_header(&[]), Err(_))); - } - - #[test] - fn verify_substrate_finality_proof_succeeds() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_block_is_finalized() { - verify_substrate_finality_proof( - 4, - Default::default(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_set_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode("deadbeef").unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_set_id_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - 42, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_proof_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - 0, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode("deadbeef").unwrap(), - ) - .unwrap_err(); - } -} diff --git a/modules/ethereum/Cargo.toml b/modules/ethereum/Cargo.toml deleted file mode 100644 index baa933611807..000000000000 --- a/modules/ethereum/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "pallet-bridge-eth-poa" -description = "A Substrate Runtime module that is able to verify PoA headers and their finality." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } -libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"], optional = true } -log = { version = "0.4.14", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-eth-poa = { path = "../../primitives/ethereum-poa", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -libsecp256k1 = { version = "0.7", features = ["hmac"] } -hex-literal = "0.3" - -[features] -default = ["std"] -std = [ - "bp-eth-poa/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "serde", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking", - "libsecp256k1", -] diff --git a/modules/ethereum/src/benchmarking.rs b/modules/ethereum/src/benchmarking.rs deleted file mode 100644 index 511cbcac1ade..000000000000 --- a/modules/ethereum/src/benchmarking.rs +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use super::*; - -use crate::test_utils::{ - build_custom_header, build_genesis_header, insert_header, validator_utils::*, - validators_change_receipt, HeaderBuilder, -}; - -use bp_eth_poa::{compute_merkle_root, U256}; -use frame_benchmarking::benchmarks_instance_pallet; -use frame_system::RawOrigin; - -benchmarks_instance_pallet! { - // Benchmark `import_unsigned_header` extrinsic with the best possible conditions: - // * Parent header is finalized. - // * New header doesn't require receipts. - // * Nothing is finalized by new header. - // * Nothing is pruned by new header. - import_unsigned_header_best_case { - let n in 1..1000; - - let num_validators = 2; - let initial_header = initialize_bench::(num_validators); - - // prepare header to be inserted - let header = build_custom_header( - &validator(1), - &initial_header, - |mut header| { - header.gas_limit = header.gas_limit + U256::from(n); - header - }, - ); - }: import_unsigned_header(RawOrigin::None, Box::new(header), None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, 1); - assert_eq!(storage.finalized_block().number, 0); - } - - // Our goal with this bench is to try and see the effect that finalizing difference ranges of - // blocks has on our import time. As such we need to make sure that we keep the number of - // validators fixed while changing the number blocks finalized (the complexity parameter) by - // importing the last header. - // - // One important thing to keep in mind is that the runtime provides a finality cache in order to - // reduce the overhead of header finalization. However, this is only triggered every 16 blocks. - import_unsigned_finality { - // Our complexity parameter, n, will represent the number of blocks imported before - // finalization. - let n in 1..7; - - let mut storage = BridgeStorage::::new(); - let num_validators: u32 = 2; - let initial_header = initialize_bench::(num_validators as usize); - - // Since we only have two validators we need to make sure the number of blocks is even to - // make sure the right validator signs the final block - let num_blocks = 2 * n; - let mut headers = Vec::new(); - let mut parent = initial_header.clone(); - - // Import a bunch of headers without any verification, will ensure that they're not - // finalized prematurely - for i in 1..=num_blocks { - let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - headers.push(header.clone()); - parent = header; - } - - let last_header = headers.last().unwrap().clone(); - let last_authority = validator(1); - - // Need to make sure that the header we're going to import hasn't been inserted - // into storage already - let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); - }: import_unsigned_header(RawOrigin::None, Box::new(header), None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); - assert_eq!(storage.finalized_block().number, num_blocks as u64); - } - - // Basically the exact same as `import_unsigned_finality` but with a different range for the - // complexity parameter. In this bench we use a larger range of blocks to see how performance - // changes when the finality cache kicks in (>16 blocks). - import_unsigned_finality_with_cache { - // Our complexity parameter, n, will represent the number of blocks imported before - // finalization. - let n in 7..100; - - let mut storage = BridgeStorage::::new(); - let num_validators: u32 = 2; - let initial_header = initialize_bench::(num_validators as usize); - - // Since we only have two validators we need to make sure the number of blocks is even to - // make sure the right validator signs the final block - let num_blocks = 2 * n; - let mut headers = Vec::new(); - let mut parent = initial_header.clone(); - - // Import a bunch of headers without any verification, will ensure that they're not - // finalized prematurely - for i in 1..=num_blocks { - let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - headers.push(header.clone()); - parent = header; - } - - let last_header = headers.last().unwrap().clone(); - let last_authority = validator(1); - - // Need to make sure that the header we're going to import hasn't been inserted - // into storage already - let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); - }: import_unsigned_header(RawOrigin::None, Box::new(header), None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); - assert_eq!(storage.finalized_block().number, num_blocks as u64); - } - - // A block import may trigger a pruning event, which adds extra work to the import progress. - // In this bench we trigger a pruning event in order to see how much extra time is spent by the - // runtime dealing with it. In the Ethereum Pallet, we're limited pruning to eight blocks in a - // single import, as dictated by MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT. - import_unsigned_pruning { - let n in 1..MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT as u32; - - let mut storage = BridgeStorage::::new(); - - let num_validators = 3; - let initial_header = initialize_bench::(num_validators as usize); - let validators = validators(num_validators); - - // Want to prune eligible blocks between [0, n) - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: n as u64, - }); - - let mut parent = initial_header; - for i in 1..=n { - let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - parent = header; - } - - let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); - }: import_unsigned_header(RawOrigin::None, Box::new(header), None) - verify { - let storage = BridgeStorage::::new(); - let max_pruned: u64 = (n - 1) as _; - assert_eq!(storage.best_block().0.number, (n + 1) as u64); - assert!(HeadersByNumber::::get(&0).is_none()); - assert!(HeadersByNumber::::get(&max_pruned).is_none()); - } - - // The goal of this bench is to import a block which contains a transaction receipt. The receipt - // will contain a validator set change. Verifying the receipt root is an expensive operation to - // do, which is why we're interested in benchmarking it. - import_unsigned_with_receipts { - let n in 1..100; - - let mut storage = BridgeStorage::::new(); - - let num_validators = 1; - let initial_header = initialize_bench::(num_validators as usize); - - let mut receipts = vec![]; - for i in 1..=n { - let receipt = validators_change_receipt(Default::default()); - receipts.push(receipt) - } - let encoded_receipts = receipts.iter().map(|r| r.rlp()); - - // We need this extra header since this is what signals a validator set transition. This - // will ensure that the next header is within the "Contract" window - let header1 = HeaderBuilder::with_parent(&initial_header).sign_by(&validator(0)); - insert_header(&mut storage, header1.clone()); - - let header = build_custom_header( - &validator(0), - &header1, - |mut header| { - // Logs Bloom signals a change in validator set - header.log_bloom = (&[0xff; 256]).into(); - header.receipts_root = compute_merkle_root(encoded_receipts); - header - }, - ); - }: import_unsigned_header(RawOrigin::None, Box::new(header), Some(receipts)) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, 2); - } -} - -fn initialize_bench, I: 'static>(num_validators: usize) -> AuraHeader { - // Initialize storage with some initial header - let initial_header = build_genesis_header(&validator(0)); - let initial_difficulty = initial_header.difficulty; - let initial_validators = validators_addresses(num_validators as usize); - - initialize_storage::(&initial_header, initial_difficulty, &initial_validators); - - initial_header -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, TestRuntime}; - use frame_support::assert_ok; - - #[test] - fn insert_unsigned_header_best_case() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_header_best_case::()); - }); - } - - #[test] - fn insert_unsigned_header_finality() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_finality::()); - }); - } - - #[test] - fn insert_unsigned_header_finality_with_cache() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_finality_with_cache::()); - }); - } - - #[test] - fn insert_unsigned_header_pruning() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_pruning::()); - }); - } - - #[test] - fn insert_unsigned_header_receipts() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_with_receipts::()); - }); - } -} diff --git a/modules/ethereum/src/error.rs b/modules/ethereum/src/error.rs deleted file mode 100644 index 6fd376b01715..000000000000 --- a/modules/ethereum/src/error.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use sp_runtime::RuntimeDebug; - -/// Header import error. -#[derive(Clone, Copy, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub enum Error { - /// The header is beyond last finalized and can not be imported. - AncientHeader = 0, - /// The header is already imported. - KnownHeader = 1, - /// Seal has an incorrect format. - InvalidSealArity = 2, - /// Block number isn't sensible. - RidiculousNumber = 3, - /// Block has too much gas used. - TooMuchGasUsed = 4, - /// Gas limit header field is invalid. - InvalidGasLimit = 5, - /// Extra data is of an invalid length. - ExtraDataOutOfBounds = 6, - /// Timestamp header overflowed. - TimestampOverflow = 7, - /// The parent header is missing from the blockchain. - MissingParentBlock = 8, - /// The header step is missing from the header. - MissingStep = 9, - /// The header signature is missing from the header. - MissingSignature = 10, - /// Empty steps are missing from the header. - MissingEmptySteps = 11, - /// The same author issued different votes at the same step. - DoubleVote = 12, - /// Validation proof insufficient. - InsufficientProof = 13, - /// Difficulty header field is invalid. - InvalidDifficulty = 14, - /// The received block is from an incorrect proposer. - NotValidator = 15, - /// Missing transaction receipts for the operation. - MissingTransactionsReceipts = 16, - /// Redundant transaction receipts are provided. - RedundantTransactionsReceipts = 17, - /// Provided transactions receipts are not matching the header. - TransactionsReceiptsMismatch = 18, - /// Can't accept unsigned header from the far future. - UnsignedTooFarInTheFuture = 19, - /// Trying to finalize sibling of finalized block. - TryingToFinalizeSibling = 20, - /// Header timestamp is ahead of on-chain timestamp - HeaderTimestampIsAhead = 21, -} - -impl Error { - pub fn msg(&self) -> &'static str { - match *self { - Error::AncientHeader => "Header is beyound last finalized and can not be imported", - Error::KnownHeader => "Header is already imported", - Error::InvalidSealArity => "Header has an incorrect seal", - Error::RidiculousNumber => "Header has too large number", - Error::TooMuchGasUsed => "Header has too much gas used", - Error::InvalidGasLimit => "Header has invalid gas limit", - Error::ExtraDataOutOfBounds => "Header has too large extra data", - Error::TimestampOverflow => "Header has too large timestamp", - Error::MissingParentBlock => "Header has unknown parent hash", - Error::MissingStep => "Header is missing step seal", - Error::MissingSignature => "Header is missing signature seal", - Error::MissingEmptySteps => "Header is missing empty steps seal", - Error::DoubleVote => "Header has invalid step in seal", - Error::InsufficientProof => "Header has insufficient proof", - Error::InvalidDifficulty => "Header has invalid difficulty", - Error::NotValidator => "Header is sealed by unexpected validator", - Error::MissingTransactionsReceipts => - "The import operation requires transactions receipts", - Error::RedundantTransactionsReceipts => "Redundant transactions receipts are provided", - Error::TransactionsReceiptsMismatch => "Invalid transactions receipts provided", - Error::UnsignedTooFarInTheFuture => "The unsigned header is too far in future", - Error::TryingToFinalizeSibling => "Trying to finalize sibling of finalized block", - Error::HeaderTimestampIsAhead => "Header timestamp is ahead of on-chain timestamp", - } - } - - /// Return unique error code. - pub fn code(&self) -> u8 { - *self as u8 - } -} diff --git a/modules/ethereum/src/finality.rs b/modules/ethereum/src/finality.rs deleted file mode 100644 index fe8841fcc044..000000000000 --- a/modules/ethereum/src/finality.rs +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{error::Error, Storage}; -use bp_eth_poa::{public_to_address, Address, AuraHeader, HeaderId, SealedEmptyStep, H256}; -use codec::{Decode, Encode}; -use scale_info::TypeInfo; -use sp_io::crypto::secp256k1_ecdsa_recover; -use sp_runtime::RuntimeDebug; -use sp_std::{ - collections::{ - btree_map::{BTreeMap, Entry}, - btree_set::BTreeSet, - vec_deque::VecDeque, - }, - prelude::*, -}; - -/// Cached finality votes for given block. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct CachedFinalityVotes { - /// True if we have stopped at best finalized block' sibling. This means - /// that we are trying to finalize block from fork that has forked before - /// best finalized. - pub stopped_at_finalized_sibling: bool, - /// Header ancestors that were read while we have been searching for - /// cached votes entry. The newest header has index 0. - pub unaccounted_ancestry: VecDeque<(HeaderId, Option, AuraHeader)>, - /// Cached finality votes, if they have been found. The associated - /// header is not included into `unaccounted_ancestry`. - pub votes: Option>, -} - -/// Finality effects. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct FinalityEffects { - /// Finalized headers. - pub finalized_headers: Vec<(HeaderId, Option)>, - /// Finality votes used in computation. - pub votes: FinalityVotes, -} - -/// Finality votes for given block. -#[derive(RuntimeDebug, Decode, Encode, TypeInfo)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct FinalityVotes { - /// Number of votes per each validator. - pub votes: BTreeMap, - /// Ancestry blocks with the oldest ancestors at the beginning and newest at the - /// end of the queue. - pub ancestry: VecDeque>, -} - -/// Information about block ancestor that is used in computations. -#[derive(RuntimeDebug, Decode, Encode, TypeInfo)] -#[cfg_attr(test, derive(Clone, Default, PartialEq))] -pub struct FinalityAncestor { - /// Bock id. - pub id: HeaderId, - /// Block submitter. - pub submitter: Option, - /// Validators that have signed this block and empty steps on top - /// of this block. - pub signers: BTreeSet
, -} - -/// Tries to finalize blocks when given block is imported. -/// -/// Returns numbers and hashes of finalized blocks in ascending order. -pub fn finalize_blocks( - storage: &S, - best_finalized: HeaderId, - header_validators: (HeaderId, &[Address]), - id: HeaderId, - submitter: Option<&S::Submitter>, - header: &AuraHeader, - two_thirds_majority_transition: u64, -) -> Result, Error> { - // compute count of voters for every unfinalized block in ancestry - let validators = header_validators.1.iter().collect(); - let votes = prepare_votes( - header - .parent_id() - .map(|parent_id| { - storage.cached_finality_votes(&parent_id, &best_finalized, |hash| { - *hash == header_validators.0.hash || *hash == best_finalized.hash - }) - }) - .unwrap_or_default(), - best_finalized, - &validators, - id, - header, - submitter.cloned(), - )?; - - // now let's iterate in reverse order && find just finalized blocks - let mut finalized_headers = Vec::new(); - let mut current_votes = votes.votes.clone(); - for ancestor in &votes.ancestry { - if !is_finalized( - &validators, - ¤t_votes, - ancestor.id.number >= two_thirds_majority_transition, - ) { - break - } - - remove_signers_votes(&ancestor.signers, &mut current_votes); - finalized_headers.push((ancestor.id, ancestor.submitter.clone())); - } - - Ok(FinalityEffects { finalized_headers, votes }) -} - -/// Returns true if there are enough votes to treat this header as finalized. -fn is_finalized( - validators: &BTreeSet<&Address>, - votes: &BTreeMap, - requires_two_thirds_majority: bool, -) -> bool { - (!requires_two_thirds_majority && votes.len() * 2 > validators.len()) || - (requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2) -} - -/// Prepare 'votes' of header and its ancestors' signers. -pub(crate) fn prepare_votes( - mut cached_votes: CachedFinalityVotes, - best_finalized: HeaderId, - validators: &BTreeSet<&Address>, - id: HeaderId, - header: &AuraHeader, - submitter: Option, -) -> Result, Error> { - // if we have reached finalized block sibling, then we're trying - // to switch finalized blocks - if cached_votes.stopped_at_finalized_sibling { - return Err(Error::TryingToFinalizeSibling) - } - - // this fn can only work with single validators set - if !validators.contains(&header.author) { - return Err(Error::NotValidator) - } - - // now we have votes that were valid when some block B has been inserted - // things may have changed a bit, but we do not need to read anything else - // from the db, because we have ancestry - // so the only thing we need to do is: - // 1) remove votes from blocks that have been finalized after B has been inserted; - // 2) add votes from B descendants - let mut votes = cached_votes.votes.unwrap_or_default(); - - // remove votes from finalized blocks - while let Some(old_ancestor) = votes.ancestry.pop_front() { - if old_ancestor.id.number > best_finalized.number { - votes.ancestry.push_front(old_ancestor); - break - } - - remove_signers_votes(&old_ancestor.signers, &mut votes.votes); - } - - // add votes from new blocks - let mut parent_empty_step_signers = empty_steps_signers(header); - let mut unaccounted_ancestry = VecDeque::new(); - while let Some((ancestor_id, ancestor_submitter, ancestor)) = - cached_votes.unaccounted_ancestry.pop_front() - { - let mut signers = empty_steps_signers(&ancestor); - sp_std::mem::swap(&mut signers, &mut parent_empty_step_signers); - signers.insert(ancestor.author); - - add_signers_votes(validators, &signers, &mut votes.votes)?; - - unaccounted_ancestry.push_front(FinalityAncestor { - id: ancestor_id, - submitter: ancestor_submitter, - signers, - }); - } - votes.ancestry.extend(unaccounted_ancestry); - - // add votes from block itself - let mut header_signers = BTreeSet::new(); - header_signers.insert(header.author); - *votes.votes.entry(header.author).or_insert(0) += 1; - votes - .ancestry - .push_back(FinalityAncestor { id, submitter, signers: header_signers }); - - Ok(votes) -} - -/// Increase count of 'votes' for every passed signer. -/// Fails if at least one of signers is not in the `validators` set. -fn add_signers_votes( - validators: &BTreeSet<&Address>, - signers_to_add: &BTreeSet
, - votes: &mut BTreeMap, -) -> Result<(), Error> { - for signer in signers_to_add { - if !validators.contains(signer) { - return Err(Error::NotValidator) - } - - *votes.entry(*signer).or_insert(0) += 1; - } - - Ok(()) -} - -/// Decrease 'votes' count for every passed signer. -fn remove_signers_votes(signers_to_remove: &BTreeSet
, votes: &mut BTreeMap) { - for signer in signers_to_remove { - match votes.entry(*signer) { - Entry::Occupied(mut entry) => - if *entry.get() <= 1 { - entry.remove(); - } else { - *entry.get_mut() -= 1; - }, - Entry::Vacant(_) => unreachable!("we only remove signers that have been added; qed"), - } - } -} - -/// Returns unique set of empty steps signers. -fn empty_steps_signers(header: &AuraHeader) -> BTreeSet
{ - header - .empty_steps() - .into_iter() - .flatten() - .filter_map(|step| empty_step_signer(&step, &header.parent_hash)) - .collect::>() -} - -/// Returns author of empty step signature. -fn empty_step_signer(empty_step: &SealedEmptyStep, parent_hash: &H256) -> Option
{ - let message = empty_step.message(parent_hash); - secp256k1_ecdsa_recover(empty_step.signature.as_fixed_bytes(), message.as_fixed_bytes()) - .ok() - .map(|public| public_to_address(&public)) -} - -impl Default for CachedFinalityVotes { - fn default() -> Self { - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: VecDeque::new(), - votes: None, - } - } -} - -impl Default for FinalityVotes { - fn default() -> Self { - FinalityVotes { votes: BTreeMap::new(), ancestry: VecDeque::new() } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime, - }, - BridgeStorage, FinalityCache, HeaderToImport, - }; - - const TOTAL_VALIDATORS: usize = 5; - - #[test] - fn verifies_header_author() { - run_test(TOTAL_VALIDATORS, |_| { - assert_eq!( - finalize_blocks( - &BridgeStorage::::new(), - Default::default(), - (Default::default(), &[]), - Default::default(), - None, - &AuraHeader::default(), - 0, - ), - Err(Error::NotValidator), - ); - }); - } - - #[test] - fn finalize_blocks_works() { - run_test(TOTAL_VALIDATORS, |ctx| { - // let's say we have 5 validators (we need 'votes' from 3 validators to achieve - // finality) - let mut storage = BridgeStorage::::new(); - - // when header#1 is inserted, nothing is finalized (1 vote) - let header1 = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(0)); - let id1 = header1.compute_id(); - let mut header_to_import = HeaderToImport { - context: storage.import_context(None, &header1.parent_hash).unwrap(), - is_best: true, - id: id1, - header: header1, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes: Default::default(), - }; - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id1, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(Vec::new()), - ); - storage.insert_header(header_to_import.clone()); - - // when header#2 is inserted, nothing is finalized (2 votes) - header_to_import.header = - HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1)); - header_to_import.id = header_to_import.header.compute_id(); - let id2 = header_to_import.header.compute_id(); - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id2, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(Vec::new()), - ); - storage.insert_header(header_to_import.clone()); - - // when header#3 is inserted, header#1 is finalized (3 votes) - header_to_import.header = - HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2)); - header_to_import.id = header_to_import.header.compute_id(); - let id3 = header_to_import.header.compute_id(); - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id3, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(vec![(id1, None)]), - ); - storage.insert_header(header_to_import); - }); - } - - #[test] - fn cached_votes_are_updated_with_ancestry() { - // we're inserting header#5 - // cached votes are from header#3 - // header#4 has finalized header#1 and header#2 - // => when inserting header#5, we need to: - // 1) remove votes from header#1 and header#2 - // 2) add votes from header#4 and header#5 - let validators = validators_addresses(5); - let headers = (1..6) - .map(|number| { - HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1)) - }) - .collect::>(); - let ancestry = headers - .iter() - .map(|header| FinalityAncestor { - id: header.compute_id(), - signers: vec![header.author].into_iter().collect(), - ..Default::default() - }) - .collect::>(); - let header5 = headers[4].clone(); - assert_eq!( - prepare_votes::<()>( - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: - vec![(headers[3].compute_id(), None, headers[3].clone()),] - .into_iter() - .collect(), - votes: Some(FinalityVotes { - votes: vec![(validators[0], 1), (validators[1], 1), (validators[2], 1),] - .into_iter() - .collect(), - ancestry: ancestry[..3].iter().cloned().collect(), - }), - }, - headers[1].compute_id(), - &validators.iter().collect(), - header5.compute_id(), - &header5, - None, - ) - .unwrap(), - FinalityVotes { - votes: vec![(validators[2], 1), (validators[3], 1), (validators[4], 1),] - .into_iter() - .collect(), - ancestry: ancestry[2..].iter().cloned().collect(), - }, - ); - } - - #[test] - fn prepare_votes_respects_finality_cache() { - run_test(TOTAL_VALIDATORS, |ctx| { - // we need signatures of 3 validators to finalize block - let mut storage = BridgeStorage::::new(); - - // headers 1..3 are signed by validator#0 - // headers 4..6 are signed by validator#1 - // headers 7..9 are signed by validator#2 - let mut hashes = Vec::new(); - let mut headers = Vec::new(); - let mut ancestry = Vec::new(); - let mut parent_hash = ctx.genesis.compute_hash(); - for i in 1..10 { - let header = - HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - hashes.push(id.hash); - ancestry.push(FinalityAncestor { - id: header.compute_id(), - submitter: None, - signers: vec![header.author].into_iter().collect(), - }); - headers.push(header); - parent_hash = id.hash; - } - - // when we're inserting header#7 and last finalized header is 0: - // check that votes at #7 are computed correctly without cache - let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 3), (ctx.addresses[2], 1)] - .into_iter() - .collect(), - ancestry: ancestry[..7].iter().cloned().collect(), - }; - let id7 = headers[6].compute_id(); - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &ctx.genesis.compute_id(), - |_| false, - ), - Default::default(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - - // cached votes at #5 - let expected_votes_at_5 = FinalityVotes { - votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 2)].into_iter().collect(), - ancestry: ancestry[..5].iter().cloned().collect(), - }; - FinalityCache::::insert(hashes[4], expected_votes_at_5); - - // when we're inserting header#7 and last finalized header is 0: - // check that votes at #7 are computed correctly with cache - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &ctx.genesis.compute_id(), - |_| false, - ), - Default::default(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - - // when we're inserting header#7 and last finalized header is 3: - // check that votes at #7 are computed correctly with cache - let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[1], 3), (ctx.addresses[2], 1)].into_iter().collect(), - ancestry: ancestry[3..7].iter().cloned().collect(), - }; - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &headers.get(2).unwrap().compute_id(), - |hash| *hash == hashes[2], - ), - headers[2].compute_id(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - }); - } - - #[test] - fn prepare_votes_fails_when_finalized_sibling_is_in_ancestry() { - assert_eq!( - prepare_votes::<()>( - CachedFinalityVotes { stopped_at_finalized_sibling: true, ..Default::default() }, - Default::default(), - &validators_addresses(3).iter().collect(), - Default::default(), - &Default::default(), - None, - ), - Err(Error::TryingToFinalizeSibling), - ); - } -} diff --git a/modules/ethereum/src/import.rs b/modules/ethereum/src/import.rs deleted file mode 100644 index 377743245777..000000000000 --- a/modules/ethereum/src/import.rs +++ /dev/null @@ -1,600 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::Error, - finality::finalize_blocks, - validators::{Validators, ValidatorsConfiguration}, - verification::{is_importable_header, verify_aura_header}, - AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage, -}; -use bp_eth_poa::{AuraHeader, HeaderId, Receipt}; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; - -/// Imports a bunch of headers and updates blocks finality. -/// -/// Transactions receipts must be provided if `header_import_requires_receipts()` -/// has returned true. -/// If successful, returns tuple where first element is the number of useful headers -/// we have imported and the second element is the number of useless headers (duplicate) -/// we have NOT imported. -/// Returns error if fatal error has occurred during import. Some valid headers may be -/// imported in this case. -/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) -#[allow(clippy::too_many_arguments)] -pub fn import_headers( - storage: &mut S, - pruning_strategy: &mut PS, - aura_config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - submitter: Option, - headers: Vec<(AuraHeader, Option>)>, - chain_time: &CT, - finalized_headers: &mut BTreeMap, -) -> Result<(u64, u64), Error> { - let mut useful = 0; - let mut useless = 0; - for (header, receipts) in headers { - let import_result = import_header( - storage, - pruning_strategy, - aura_config, - validators_config, - submitter.clone(), - header, - chain_time, - receipts, - ); - - match import_result { - Ok((_, finalized)) => { - for (_, submitter) in finalized { - if let Some(submitter) = submitter { - *finalized_headers.entry(submitter).or_default() += 1; - } - } - useful += 1; - }, - Err(Error::AncientHeader) | Err(Error::KnownHeader) => useless += 1, - Err(error) => return Err(error), - } - } - - Ok((useful, useless)) -} - -/// A vector of finalized headers and their submitters. -pub type FinalizedHeaders = Vec<(HeaderId, Option<::Submitter>)>; - -/// Imports given header and updates blocks finality (if required). -/// -/// Transactions receipts must be provided if `header_import_requires_receipts()` -/// has returned true. -/// -/// Returns imported block id and list of all finalized headers. -/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) -#[allow(clippy::too_many_arguments)] -pub fn import_header( - storage: &mut S, - pruning_strategy: &mut PS, - aura_config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - submitter: Option, - header: AuraHeader, - chain_time: &CT, - receipts: Option>, -) -> Result<(HeaderId, FinalizedHeaders), Error> { - // first check that we are able to import this header at all - let (header_id, finalized_id) = is_importable_header(storage, &header)?; - - // verify header - let import_context = verify_aura_header(storage, aura_config, submitter, &header, chain_time)?; - - // check if block schedules new validators - let validators = Validators::new(validators_config); - let (scheduled_change, enacted_change) = - validators.extract_validators_change(&header, receipts)?; - - // check if block finalizes some other blocks and corresponding scheduled validators - let validators_set = import_context.validators_set(); - let finalized_blocks = finalize_blocks( - storage, - finalized_id, - (validators_set.enact_block, &validators_set.validators), - header_id, - import_context.submitter(), - &header, - aura_config.two_thirds_majority_transition, - )?; - let enacted_change = enacted_change - .map(|validators| ChangeToEnact { signal_block: None, validators }) - .or_else(|| { - validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers) - }); - - // NOTE: we can't return Err() from anywhere below this line - // (because otherwise we'll have inconsistent storage if transaction will fail) - - // and finally insert the block - let (best_id, best_total_difficulty) = storage.best_block(); - let total_difficulty = import_context.total_difficulty() + header.difficulty; - let is_best = total_difficulty > best_total_difficulty; - storage.insert_header(import_context.into_import_header( - is_best, - header_id, - header, - total_difficulty, - enacted_change, - scheduled_change, - finalized_blocks.votes, - )); - - // compute upper border of updated pruning range - let new_best_block_id = if is_best { header_id } else { best_id }; - let new_best_finalized_block_id = finalized_blocks.finalized_headers.last().map(|(id, _)| *id); - let pruning_upper_bound = pruning_strategy.pruning_upper_bound( - new_best_block_id.number, - new_best_finalized_block_id.map(|id| id.number).unwrap_or(finalized_id.number), - ); - - // now mark finalized headers && prune old headers - storage.finalize_and_prune_headers(new_best_finalized_block_id, pruning_upper_bound); - - Ok((header_id, finalized_blocks.finalized_headers)) -} - -/// Returns true if transactions receipts are required to import given header. -pub fn header_import_requires_receipts( - storage: &S, - validators_config: &ValidatorsConfiguration, - header: &AuraHeader, -) -> bool { - is_importable_header(storage, header) - .map(|_| Validators::new(validators_config)) - .map(|validators| validators.maybe_signals_validators_change(header)) - .unwrap_or(false) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - run_test, secret_to_address, test_aura_config, test_validators_config, validator, - validators_addresses, validators_change_receipt, HeaderBuilder, - KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT, - }, - validators::ValidatorsSource, - BlocksToPrune, BridgeStorage, Headers, PruningRange, - }; - use libsecp256k1::SecretKey; - - const TOTAL_VALIDATORS: usize = 3; - - #[test] - fn rejects_finalized_block_competitors() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - storage.finalize_and_prune_headers( - Some(HeaderId { number: 100, ..Default::default() }), - 0, - ); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - Default::default(), - &(), - None, - ), - Err(Error::AncientHeader), - ); - }); - } - - #[test] - fn rejects_known_header() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - header.clone(), - &(), - None, - ) - .map(|_| ()), - Ok(()), - ); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - header, - &(), - None, - ) - .map(|_| ()), - Err(Error::KnownHeader), - ); - }); - } - - #[test] - fn import_header_works() { - run_test(TOTAL_VALIDATORS, |ctx| { - let validators_config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(ctx.addresses.clone())), - (1, ValidatorsSource::List(validators_addresses(2))), - ]); - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); - let hash = header.compute_hash(); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - None, - header, - &(), - None - ) - .map(|_| ()), - Ok(()), - ); - - // check that new validators will be used for next header - let imported_header = Headers::::get(&hash).unwrap(); - assert_eq!( - imported_header.next_validators_set_id, - 1, // new set is enacted from config - ); - }); - } - - #[test] - fn headers_are_pruned_during_import() { - run_test(TOTAL_VALIDATORS, |ctx| { - let validators_config = ValidatorsConfiguration::Single(ValidatorsSource::Contract( - [3; 20].into(), - ctx.addresses.clone(), - )); - let validators = vec![validator(0), validator(1), validator(2)]; - let mut storage = BridgeStorage::::new(); - - // header [0..11] are finalizing blocks [0; 9] - // => since we want to keep 10 finalized blocks, we aren't pruning anything - let mut latest_block_id = Default::default(); - for i in 1..11 { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&validators); - let parent_id = header.parent_id().unwrap(); - - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(100), - header, - &(), - None, - ) - .unwrap(); - match i { - 2..=10 => { - assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,) - }, - _ => assert_eq!(finalized_blocks, vec![], "At {}", i), - } - latest_block_id = rolling_last_block_id; - } - assert!(storage.header(&ctx.genesis.compute_hash()).is_some()); - - // header 11 finalizes headers [10] AND schedules change - // => we prune header#0 - let header11 = HeaderBuilder::with_parent_number(10) - .log_bloom((&[0xff; 256]).into()) - .receipts_root( - "ead6c772ba0083bbff497ba0f4efe47c199a2655401096c21ab7450b6c466d97" - .parse() - .unwrap(), - ) - .sign_by_set(&validators); - let parent_id = header11.parent_id().unwrap(); - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(101), - header11.clone(), - &(), - Some(vec![validators_change_receipt(latest_block_id.hash)]), - ) - .unwrap(); - assert_eq!(finalized_blocks, vec![(parent_id, Some(100))],); - assert!(storage.header(&ctx.genesis.compute_hash()).is_none()); - latest_block_id = rolling_last_block_id; - - // and now let's say validators 1 && 2 went offline - // => in the range 12-25 no blocks are finalized, but we still continue to prune old - // headers until header#11 is met. we can't prune #11, because it schedules change - let mut step = 56u64; - let mut expected_blocks = vec![(header11.compute_id(), Some(101))]; - for i in 12..25 { - let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) - .difficulty(i.into()) - .step(step) - .sign_by_set(&validators); - expected_blocks.push((header.compute_id(), Some(102))); - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(102), - header, - &(), - None, - ) - .unwrap(); - assert_eq!(finalized_blocks, vec![],); - latest_block_id = rolling_last_block_id; - step += 3; - } - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 11, oldest_block_to_keep: 14 }, - ); - - // now let's insert block signed by validator 1 - // => blocks 11..24 are finalized and blocks 11..14 are pruned - step -= 2; - let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) - .difficulty(25.into()) - .step(step) - .sign_by_set(&validators); - let (_, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(103), - header, - &(), - None, - ) - .unwrap(); - assert_eq!(finalized_blocks, expected_blocks); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 15, oldest_block_to_keep: 15 }, - ); - }); - } - - fn import_custom_block( - storage: &mut S, - validators: &[SecretKey], - header: AuraHeader, - ) -> Result { - let id = header.compute_id(); - import_header( - storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &ValidatorsConfiguration::Single(ValidatorsSource::Contract( - [0; 20].into(), - validators.iter().map(secret_to_address).collect(), - )), - None, - header, - &(), - None, - ) - .map(|_| id) - } - - #[test] - fn import_of_non_best_block_may_finalize_blocks() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - - // insert headers (H1, validator1), (H2, validator1), (H3, validator1) - // making H3 the best header, without finalizing anything (we need 2 signatures) - let mut expected_best_block = Default::default(); - for i in 1..4 { - let step = 1 + i * TOTAL_VALIDATORS as u64; - expected_best_block = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(i - 1) - .step(step) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - } - let (best_block, best_difficulty) = storage.best_block(); - assert_eq!(best_block, expected_best_block); - assert_eq!(storage.finalized_block(), ctx.genesis.compute_id()); - - // insert headers (H1', validator1), (H2', validator2), finalizing H2, even though H3 - // has better difficulty than H2' (because there are more steps involved) - let mut expected_finalized_block = Default::default(); - let mut parent_hash = ctx.genesis.compute_hash(); - for i in 1..3 { - let step = i; - let id = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(parent_hash) - .step(step) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - parent_hash = id.hash; - if i == 1 { - expected_finalized_block = id; - } - } - let (new_best_block, new_best_difficulty) = storage.best_block(); - assert_eq!(new_best_block, expected_best_block); - assert_eq!(new_best_difficulty, best_difficulty); - assert_eq!(storage.finalized_block(), expected_finalized_block); - }); - } - - #[test] - fn append_to_unfinalized_fork_fails() { - const VALIDATORS: u64 = 5; - run_test(VALIDATORS as usize, |ctx| { - let mut storage = BridgeStorage::::new(); - - // header1, authored by validator[2] is best common block between two competing forks - let header1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(0).step(2).sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header1); - assert_eq!(storage.finalized_block().number, 0); - - // validator[3] has authored header2 (nothing is finalized yet) - let header2 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1).step(3).sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header2); - assert_eq!(storage.finalized_block().number, 0); - - // validator[4] has authored header3 (header1 is finalized) - let header3 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(2).step(4).sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header3); - assert_eq!(storage.finalized_block(), header1); - - // validator[4] has authored 4 blocks: header2'...header5' (header1 is still finalized) - let header2_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .step(4) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header3_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header2_1.hash) - .step(4 + VALIDATORS) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header4_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header3_1.hash) - .step(4 + VALIDATORS * 2) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header5_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header4_1.hash) - .step(4 + VALIDATORS * 3) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5_1); - assert_eq!(storage.finalized_block(), header1); - - // when we import header4 { parent = header3 }, authored by validator[0], header2 is - // finalized - let header4 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(3).step(5).sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5_1); - assert_eq!(storage.finalized_block(), header2); - - // when we import header5 { parent = header4 }, authored by validator[1], header3 is - // finalized - let header5 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header4.hash) - .step(6) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5); - assert_eq!(storage.finalized_block(), header3); - - // import of header2'' { parent = header1 } fails, because it has number < - // best_finalized - assert_eq!( - import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .step(3) - .sign_by_set(&ctx.validators) - ), - Err(Error::AncientHeader), - ); - - // import of header6' should also fail because we're trying to append to fork thas - // has forked before finalized block - assert_eq!( - import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(5) - .gas_limit((GAS_LIMIT + 1).into()) - .step(5 + VALIDATORS * 4) - .sign_by_set(&ctx.validators), - ), - Err(Error::TryingToFinalizeSibling), - ); - }); - } -} diff --git a/modules/ethereum/src/lib.rs b/modules/ethereum/src/lib.rs deleted file mode 100644 index 4224818ad96c..000000000000 --- a/modules/ethereum/src/lib.rs +++ /dev/null @@ -1,1572 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use crate::finality::{CachedFinalityVotes, FinalityVotes}; -use bp_eth_poa::{ - Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256, -}; -use codec::{Decode, Encode}; -use frame_support::traits::Get; -use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::{boxed::Box, cmp::Ord, collections::btree_map::BTreeMap, prelude::*}; - -pub use validators::{ValidatorsConfiguration, ValidatorsSource}; - -mod error; -mod finality; -mod import; -mod validators; -mod verification; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; - -#[cfg(test)] -mod mock; - -#[cfg(any(feature = "runtime-benchmarks", test))] -pub mod test_utils; - -/// Maximal number of blocks we're pruning in single import call. -const MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT: u64 = 8; - -/// Authority round engine configuration parameters. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct AuraConfiguration { - /// Empty step messages transition block. - pub empty_steps_transition: u64, - /// Transition block to strict empty steps validation. - pub strict_empty_steps_transition: u64, - /// Monotonic step validation transition block. - pub validate_step_transition: u64, - /// Chain score validation transition block. - pub validate_score_transition: u64, - /// First block for which a 2/3 quorum (instead of 1/2) is required. - pub two_thirds_majority_transition: u64, - /// Minimum gas limit. - pub min_gas_limit: U256, - /// Maximum gas limit. - pub max_gas_limit: U256, - /// Maximum size of extra data. - pub maximum_extra_data_size: u64, -} - -/// Transaction pool configuration. -/// -/// This is used to limit number of unsigned headers transactions in -/// the pool. We never use it to verify signed transactions. -pub struct PoolConfiguration { - /// Maximal difference between number of header from unsigned transaction - /// and current best block. This must be selected with caution - the more - /// is the difference, the more (potentially invalid) transactions could be - /// accepted to the pool and mined later (filling blocks with spam). - pub max_future_number_difference: u64, -} - -/// Block header as it is stored in the runtime storage. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct StoredHeader { - /// Submitter of this header. May be `None` if header has been submitted - /// using unsigned transaction. - pub submitter: Option, - /// The block header itself. - pub header: AuraHeader, - /// Total difficulty of the chain. - pub total_difficulty: U256, - /// The ID of set of validators that is expected to produce direct descendants of - /// this block. If header enacts new set, this would be the new set. Otherwise - /// this is the set that has produced the block itself. - /// The hash is the hash of block where validators set has been enacted. - pub next_validators_set_id: u64, - /// Hash of the last block which has **SCHEDULED** validators set change. - /// Note that signal doesn't mean that the set has been (or ever will be) enacted. - /// Note that the header may already be pruned. - pub last_signal_block: Option, -} - -/// Validators set as it is stored in the runtime storage. -#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -#[cfg_attr(test, derive(Clone))] -pub struct ValidatorsSet { - /// Validators of this set. - pub validators: Vec
, - /// Hash of the block where this set has been signaled. None if this is the first set. - pub signal_block: Option, - /// Hash of the block where this set has been enacted. - pub enact_block: HeaderId, -} - -/// Validators set change as it is stored in the runtime storage. -#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -#[cfg_attr(test, derive(Clone))] -pub struct AuraScheduledChange { - /// Validators of this set. - pub validators: Vec
, - /// Hash of the block which has emitted previous validators change signal. - pub prev_signal_block: Option, -} - -/// Header that we're importing. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct HeaderToImport { - /// Header import context, - pub context: ImportContext, - /// Should we consider this header as best? - pub is_best: bool, - /// The id of the header. - pub id: HeaderId, - /// The header itself. - pub header: AuraHeader, - /// Total chain difficulty at the header. - pub total_difficulty: U256, - /// New validators set and the hash of block where it has been scheduled (if applicable). - /// Some if set is is enacted by this header. - pub enacted_change: Option, - /// Validators set scheduled change, if happened at the header. - pub scheduled_change: Option>, - /// Finality votes at this header. - pub finality_votes: FinalityVotes, -} - -/// Header that we're importing. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct ChangeToEnact { - /// The id of the header where change has been scheduled. - /// None if it is a first set within current `ValidatorsSource`. - pub signal_block: Option, - /// Validators set that is enacted. - pub validators: Vec
, -} - -/// Blocks range that we want to prune. -#[derive(Encode, Decode, Default, RuntimeDebug, Clone, PartialEq, TypeInfo)] -struct PruningRange { - /// Number of the oldest unpruned block(s). This might be the block that we do not - /// want to prune now (then it is equal to `oldest_block_to_keep`), or block that we - /// were unable to prune for whatever reason (i.e. if it isn't finalized yet and has - /// scheduled validators set change). - pub oldest_unpruned_block: u64, - /// Number of the oldest block(s) that we want to keep. We want to prune blocks in range - /// [ `oldest_unpruned_block`; `oldest_block_to_keep` ). - pub oldest_block_to_keep: u64, -} - -/// Header import context. -/// -/// The import context contains information needed by the header verification -/// pipeline which is not directly part of the header being imported. This includes -/// information relating to its parent, and the current validator set (which -/// provide _context_ for the current header). -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct ImportContext { - submitter: Option, - parent_hash: H256, - parent_header: AuraHeader, - parent_total_difficulty: U256, - parent_scheduled_change: Option, - validators_set_id: u64, - validators_set: ValidatorsSet, - last_signal_block: Option, -} - -impl ImportContext { - /// Returns reference to header submitter (if known). - pub fn submitter(&self) -> Option<&Submitter> { - self.submitter.as_ref() - } - - /// Returns reference to parent header. - pub fn parent_header(&self) -> &AuraHeader { - &self.parent_header - } - - /// Returns total chain difficulty at parent block. - pub fn total_difficulty(&self) -> &U256 { - &self.parent_total_difficulty - } - - /// Returns the validator set change if the parent header has signaled a change. - pub fn parent_scheduled_change(&self) -> Option<&AuraScheduledChange> { - self.parent_scheduled_change.as_ref() - } - - /// Returns id of the set of validators. - pub fn validators_set_id(&self) -> u64 { - self.validators_set_id - } - - /// Returns reference to validators set for the block we're going to import. - pub fn validators_set(&self) -> &ValidatorsSet { - &self.validators_set - } - - /// Returns reference to the latest block which has signaled change of validators set. - /// This may point to parent if parent has signaled change. - pub fn last_signal_block(&self) -> Option { - match self.parent_scheduled_change { - Some(_) => Some(HeaderId { number: self.parent_header.number, hash: self.parent_hash }), - None => self.last_signal_block, - } - } - - /// Converts import context into header we're going to import. - #[allow(clippy::too_many_arguments)] - pub fn into_import_header( - self, - is_best: bool, - id: HeaderId, - header: AuraHeader, - total_difficulty: U256, - enacted_change: Option, - scheduled_change: Option>, - finality_votes: FinalityVotes, - ) -> HeaderToImport { - HeaderToImport { - context: self, - is_best, - id, - header, - total_difficulty, - enacted_change, - scheduled_change, - finality_votes, - } - } -} - -/// The storage that is used by the client. -/// -/// Storage modification must be discarded if block import has failed. -pub trait Storage { - /// Header submitter identifier. - type Submitter: Clone + Ord; - - /// Get best known block and total chain difficulty. - fn best_block(&self) -> (HeaderId, U256); - /// Get last finalized block. - fn finalized_block(&self) -> HeaderId; - /// Get imported header by its hash. - /// - /// Returns header and its submitter (if known). - fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)>; - /// Returns latest cached finality votes (if any) for block ancestors, starting - /// from `parent_hash` block and stopping at genesis block, best finalized block - /// or block where `stop_at` returns true. - fn cached_finality_votes( - &self, - parent: &HeaderId, - best_finalized: &HeaderId, - stop_at: impl Fn(&H256) -> bool, - ) -> CachedFinalityVotes; - /// Get header import context by parent header hash. - fn import_context( - &self, - submitter: Option, - parent_hash: &H256, - ) -> Option>; - /// Get new validators that are scheduled by given header and hash of the previous - /// block that has scheduled change. - fn scheduled_change(&self, hash: &H256) -> Option; - /// Insert imported header. - fn insert_header(&mut self, header: HeaderToImport); - /// Finalize given block and schedules pruning of all headers - /// with number < prune_end. - /// - /// The headers in the pruning range could be either finalized, or not. - /// It is the storage duty to ensure that unfinalized headers that have - /// scheduled changes won't be pruned until they or their competitors - /// are finalized. - fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64); -} - -/// Headers pruning strategy. -pub trait PruningStrategy: Default { - /// Return upper bound (exclusive) of headers pruning range. - /// - /// Every value that is returned from this function, must be greater or equal to the - /// previous value. Otherwise it will be ignored (we can't revert pruning). - /// - /// Pallet may prune both finalized and unfinalized blocks. But it can't give any - /// guarantees on when it will happen. Example: if some unfinalized block at height N - /// has scheduled validators set change, then the module won't prune any blocks with - /// number greater than or equal to N even if strategy allows that. - /// - /// If your strategy allows pruning unfinalized blocks, this could lead to switch - /// between finalized forks (only if authorities are misbehaving). But since 50 percent plus one - /// (or 2/3) authorities are able to do whatever they want with the chain, this isn't considered - /// fatal. If your strategy only prunes finalized blocks, we'll never be able to finalize - /// header that isn't descendant of current best finalized block. - fn pruning_upper_bound(&mut self, best_number: u64, best_finalized_number: u64) -> u64; -} - -/// ChainTime represents the runtime on-chain time -pub trait ChainTime: Default { - /// Is a header timestamp ahead of the current on-chain time. - /// - /// Check whether `timestamp` is ahead (i.e greater than) the current on-chain - /// time. If so, return `true`, `false` otherwise. - fn is_timestamp_ahead(&self, timestamp: u64) -> bool; -} - -/// ChainTime implementation for the empty type. -/// -/// This implementation will allow a runtime without the timestamp pallet to use -/// the empty type as its ChainTime associated type. -impl ChainTime for () { - fn is_timestamp_ahead(&self, _: u64) -> bool { - false - } -} - -/// Callbacks for header submission rewards/penalties. -pub trait OnHeadersSubmitted { - /// Called when valid headers have been submitted. - /// - /// The submitter **must not** be rewarded for submitting valid headers, because greedy - /// authority could produce and submit multiple valid headers (without relaying them to other - /// peers) and get rewarded. Instead, the provider could track submitters and stop rewarding if - /// too many headers have been submitted without finalization. - fn on_valid_headers_submitted(submitter: AccountId, useful: u64, useless: u64); - /// Called when invalid headers have been submitted. - fn on_invalid_headers_submitted(submitter: AccountId); - /// Called when earlier submitted headers have been finalized. - /// - /// finalized is the number of headers that submitter has submitted and which - /// have been finalized. - fn on_valid_headers_finalized(submitter: AccountId, finalized: u64); -} - -impl OnHeadersSubmitted for () { - fn on_valid_headers_submitted(_submitter: AccountId, _useful: u64, _useless: u64) {} - fn on_invalid_headers_submitted(_submitter: AccountId) {} - fn on_valid_headers_finalized(_submitter: AccountId, _finalized: u64) {} -} - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// Aura configuration. - type AuraConfiguration: Get; - /// Validators configuration. - type ValidatorsConfiguration: Get; - - /// Interval (in blocks) for for finality votes caching. - /// If None, cache is disabled. - /// - /// Ideally, this should either be None (when we are sure that there won't - /// be any significant finalization delays), or something that is bit larger - /// than average finalization delay. - type FinalityVotesCachingInterval: Get>; - /// Headers pruning strategy. - type PruningStrategy: PruningStrategy; - /// Header timestamp verification against current on-chain time. - type ChainTime: ChainTime; - - /// Handler for headers submission result. - type OnHeadersSubmitted: OnHeadersSubmitted; - } - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet {} - - #[pallet::call] - impl, I: 'static> Pallet { - /// Import single Aura header. Requires transaction to be **UNSIGNED**. - #[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_unsigned_header( - origin: OriginFor, - header: Box, - receipts: Option>, - ) -> DispatchResult { - frame_system::ensure_none(origin)?; - - import::import_header( - &mut BridgeStorage::::new(), - &mut T::PruningStrategy::default(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - None, - *header, - &T::ChainTime::default(), - receipts, - ) - .map_err(|e| e.msg())?; - - Ok(()) - } - - /// Import Aura chain headers in a single **SIGNED** transaction. - /// Ignores non-fatal errors (like when known header is provided), rewards - /// for successful headers import and penalizes for fatal errors. - /// - /// This should be used with caution - passing too many headers could lead to - /// enormous block production/import time. - #[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_signed_headers( - origin: OriginFor, - headers_with_receipts: Vec<(AuraHeader, Option>)>, - ) -> DispatchResult { - let submitter = frame_system::ensure_signed(origin)?; - let mut finalized_headers = BTreeMap::new(); - let import_result = import::import_headers( - &mut BridgeStorage::::new(), - &mut T::PruningStrategy::default(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - Some(submitter.clone()), - headers_with_receipts, - &T::ChainTime::default(), - &mut finalized_headers, - ); - - // if we have finalized some headers, we will reward their submitters even - // if current submitter has provided some invalid headers - for (f_submitter, f_count) in finalized_headers { - T::OnHeadersSubmitted::on_valid_headers_finalized(f_submitter, f_count); - } - - // now track/penalize current submitter for providing new headers - match import_result { - Ok((useful, useless)) => - T::OnHeadersSubmitted::on_valid_headers_submitted(submitter, useful, useless), - Err(error) => { - // even though we may have accept some headers, we do not want to reward someone - // who provides invalid headers - T::OnHeadersSubmitted::on_invalid_headers_submitted(submitter); - return Err(error.msg().into()) - }, - } - - Ok(()) - } - } - - #[pallet::validate_unsigned] - impl, I: 'static> ValidateUnsigned for Pallet { - type Call = Call; - - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - match *call { - Self::Call::import_unsigned_header { ref header, ref receipts } => { - let accept_result = verification::accept_aura_header_into_pool( - &BridgeStorage::::new(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - &pool_configuration(), - header, - &T::ChainTime::default(), - receipts.as_ref(), - ); - - match accept_result { - Ok((requires, provides)) => Ok(ValidTransaction { - priority: TransactionPriority::max_value(), - requires, - provides, - longevity: TransactionLongevity::max_value(), - propagate: true, - }), - // UnsignedTooFarInTheFuture is the special error code used to limit - // number of transactions in the pool - we do not want to ban transaction - // in this case (see verification.rs for details) - Err(error::Error::UnsignedTooFarInTheFuture) => UnknownTransaction::Custom( - error::Error::UnsignedTooFarInTheFuture.code(), - ) - .into(), - Err(error) => InvalidTransaction::Custom(error.code()).into(), - } - }, - _ => InvalidTransaction::Call.into(), - } - } - } - - /// Best known block. - #[pallet::storage] - pub(super) type BestBlock, I: 'static = ()> = - StorageValue<_, (HeaderId, U256), ValueQuery>; - - /// Best finalized block. - #[pallet::storage] - pub(super) type FinalizedBlock, I: 'static = ()> = - StorageValue<_, HeaderId, ValueQuery>; - - /// Range of blocks that we want to prune. - #[pallet::storage] - pub(super) type BlocksToPrune, I: 'static = ()> = - StorageValue<_, PruningRange, ValueQuery>; - - /// Map of imported headers by hash. - #[pallet::storage] - pub(super) type Headers, I: 'static = ()> = - StorageMap<_, Identity, H256, StoredHeader>; - - /// Map of imported header hashes by number. - #[pallet::storage] - pub(super) type HeadersByNumber, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, u64, Vec>; - - /// Map of cached finality data by header hash. - #[pallet::storage] - pub(super) type FinalityCache, I: 'static = ()> = - StorageMap<_, Identity, H256, FinalityVotes>; - - /// The ID of next validator set. - #[pallet::storage] - pub(super) type NextValidatorsSetId, I: 'static = ()> = - StorageValue<_, u64, ValueQuery>; - - /// Map of validators sets by their id. - #[pallet::storage] - pub(super) type ValidatorsSets, I: 'static = ()> = - StorageMap<_, Twox64Concat, u64, ValidatorsSet>; - - /// Validators sets reference count. Each header that is authored by this set increases - /// the reference count. When we prune this header, we decrease the reference count. - /// When it reaches zero, we are free to prune validator set as well. - #[pallet::storage] - pub(super) type ValidatorsSetsRc, I: 'static = ()> = - StorageMap<_, Twox64Concat, u64, u64>; - - /// Map of validators set changes scheduled by given header. - #[pallet::storage] - pub(super) type ScheduledChanges, I: 'static = ()> = - StorageMap<_, Identity, H256, AuraScheduledChange>; - - #[pallet::genesis_config] - #[cfg_attr(feature = "std", derive(Default))] - pub struct GenesisConfig { - /// PoA header to start with. - pub initial_header: AuraHeader, - /// Initial PoA chain difficulty. - pub initial_difficulty: U256, - /// Initial PoA validators set. - pub initial_validators: Vec
, - } - - #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { - fn build(&self) { - // the initial blocks should be selected so that: - // 1) it doesn't signal validators changes; - // 2) there are no scheduled validators changes from previous blocks; - // 3) (implied) all direct children of initial block are authored by the same validators - // set. - - assert!(!self.initial_validators.is_empty(), "Initial validators set can't be empty",); - - initialize_storage::( - &self.initial_header, - self.initial_difficulty, - &self.initial_validators, - ); - } - } -} - -impl, I: 'static> Pallet { - /// Returns number and hash of the best block known to the bridge module. - /// The caller should only submit `import_header` transaction that makes - /// (or leads to making) other header the best one. - pub fn best_block() -> HeaderId { - BridgeStorage::::new().best_block().0 - } - - /// Returns number and hash of the best finalized block known to the bridge module. - pub fn finalized_block() -> HeaderId { - BridgeStorage::::new().finalized_block() - } - - /// Returns true if the import of given block requires transactions receipts. - pub fn is_import_requires_receipts(header: AuraHeader) -> bool { - import::header_import_requires_receipts( - &BridgeStorage::::new(), - &T::ValidatorsConfiguration::get(), - &header, - ) - } - - /// Returns true if header is known to the runtime. - pub fn is_known_block(hash: H256) -> bool { - BridgeStorage::::new().header(&hash).is_some() - } - - /// Verify that transaction is included into given finalized block. - pub fn verify_transaction_finalized( - block: H256, - tx_index: u64, - proof: &[(RawTransaction, RawTransactionReceipt)], - ) -> bool { - crate::verify_transaction_finalized(&BridgeStorage::::new(), block, tx_index, proof) - } -} - -/// Runtime bridge storage. -#[derive(Default)] -pub struct BridgeStorage(sp_std::marker::PhantomData<(T, I)>); - -impl, I: 'static> BridgeStorage { - /// Create new BridgeStorage. - pub fn new() -> Self { - BridgeStorage(sp_std::marker::PhantomData::<(T, I)>::default()) - } - - /// Prune old blocks. - fn prune_blocks(&self, mut max_blocks_to_prune: u64, finalized_number: u64, prune_end: u64) { - let pruning_range = BlocksToPrune::::get(); - let mut new_pruning_range = pruning_range.clone(); - - // update oldest block we want to keep - if prune_end > new_pruning_range.oldest_block_to_keep { - new_pruning_range.oldest_block_to_keep = prune_end; - } - - // start pruning blocks - let begin = new_pruning_range.oldest_unpruned_block; - let end = new_pruning_range.oldest_block_to_keep; - log::trace!(target: "runtime", "Pruning blocks in range [{}..{})", begin, end); - for number in begin..end { - // if we can't prune anything => break - if max_blocks_to_prune == 0 { - break - } - - // read hashes of blocks with given number and try to prune these blocks - let blocks_at_number = HeadersByNumber::::take(number); - if let Some(mut blocks_at_number) = blocks_at_number { - self.prune_blocks_by_hashes( - &mut max_blocks_to_prune, - finalized_number, - number, - &mut blocks_at_number, - ); - - // if we haven't pruned all blocks, remember unpruned - if !blocks_at_number.is_empty() { - HeadersByNumber::::insert(number, blocks_at_number); - break - } - } - - // we have pruned all headers at number - new_pruning_range.oldest_unpruned_block = number + 1; - log::trace!( - target: "runtime", - "Oldest unpruned PoA header is now: {}", - new_pruning_range.oldest_unpruned_block, - ); - } - - // update pruning range in storage - if pruning_range != new_pruning_range { - BlocksToPrune::::put(new_pruning_range); - } - } - - /// Prune old blocks with given hashes. - fn prune_blocks_by_hashes( - &self, - max_blocks_to_prune: &mut u64, - finalized_number: u64, - number: u64, - blocks_at_number: &mut Vec, - ) { - // ensure that unfinalized headers we want to prune do not have scheduled changes - if number > finalized_number && - blocks_at_number.iter().any(ScheduledChanges::::contains_key) - { - return - } - - // physically remove headers and (probably) obsolete validators sets - while let Some(hash) = blocks_at_number.pop() { - let header = Headers::::take(&hash); - log::trace!( - target: "runtime", - "Pruning PoA header: ({}, {})", - number, - hash, - ); - - ScheduledChanges::::remove(hash); - FinalityCache::::remove(hash); - if let Some(header) = header { - ValidatorsSetsRc::::mutate(header.next_validators_set_id, |rc| match *rc { - Some(rc) if rc > 1 => Some(rc - 1), - _ => None, - }); - } - - // check if we have already pruned too much headers in this call - *max_blocks_to_prune -= 1; - if *max_blocks_to_prune == 0 { - return - } - } - } -} - -impl, I: 'static> Storage for BridgeStorage { - type Submitter = T::AccountId; - - fn best_block(&self) -> (HeaderId, U256) { - BestBlock::::get() - } - - fn finalized_block(&self) -> HeaderId { - FinalizedBlock::::get() - } - - fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)> { - Headers::::get(hash).map(|header| (header.header, header.submitter)) - } - - fn cached_finality_votes( - &self, - parent: &HeaderId, - best_finalized: &HeaderId, - stop_at: impl Fn(&H256) -> bool, - ) -> CachedFinalityVotes { - let mut votes = CachedFinalityVotes::default(); - let mut current_id = *parent; - loop { - // if we have reached finalized block's sibling => stop with special signal - if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash - { - votes.stopped_at_finalized_sibling = true; - return votes - } - - // if we have reached target header => stop - if stop_at(¤t_id.hash) { - return votes - } - - // if we have found cached votes => stop - let cached_votes = FinalityCache::::get(¤t_id.hash); - if let Some(cached_votes) = cached_votes { - votes.votes = Some(cached_votes); - return votes - } - - // read next parent header id - let header = match Headers::::get(¤t_id.hash) { - Some(header) if header.header.number != 0 => header, - _ => return votes, - }; - let parent_id = header.header.parent_id().expect( - "only returns None at genesis header;\ - the header is proved to have number > 0;\ - qed", - ); - - votes - .unaccounted_ancestry - .push_back((current_id, header.submitter, header.header)); - - current_id = parent_id; - } - } - - fn import_context( - &self, - submitter: Option, - parent_hash: &H256, - ) -> Option> { - Headers::::get(parent_hash).map(|parent_header| { - let validators_set = ValidatorsSets::::get(parent_header.next_validators_set_id) - .expect( - "validators set is only pruned when last ref is pruned; there is a ref; qed", - ); - let parent_scheduled_change = ScheduledChanges::::get(parent_hash); - ImportContext { - submitter, - parent_hash: *parent_hash, - parent_header: parent_header.header, - parent_total_difficulty: parent_header.total_difficulty, - parent_scheduled_change, - validators_set_id: parent_header.next_validators_set_id, - validators_set, - last_signal_block: parent_header.last_signal_block, - } - }) - } - - fn scheduled_change(&self, hash: &H256) -> Option { - ScheduledChanges::::get(hash) - } - - fn insert_header(&mut self, header: HeaderToImport) { - if header.is_best { - BestBlock::::put((header.id, header.total_difficulty)); - } - if let Some(scheduled_change) = header.scheduled_change { - ScheduledChanges::::insert( - &header.id.hash, - AuraScheduledChange { - validators: scheduled_change, - prev_signal_block: header.context.last_signal_block, - }, - ); - } - let next_validators_set_id = match header.enacted_change { - Some(enacted_change) => { - let next_validators_set_id = NextValidatorsSetId::::mutate(|set_id| { - let next_set_id = *set_id; - *set_id += 1; - next_set_id - }); - ValidatorsSets::::insert( - next_validators_set_id, - ValidatorsSet { - validators: enacted_change.validators, - enact_block: header.id, - signal_block: enacted_change.signal_block, - }, - ); - ValidatorsSetsRc::::insert(next_validators_set_id, 1); - next_validators_set_id - }, - None => { - ValidatorsSetsRc::::mutate(header.context.validators_set_id, |rc| { - *rc = Some(rc.map(|rc| rc + 1).unwrap_or(1)); - *rc - }); - header.context.validators_set_id - }, - }; - - let finality_votes_caching_interval = T::FinalityVotesCachingInterval::get(); - if let Some(finality_votes_caching_interval) = finality_votes_caching_interval { - let cache_entry_required = - header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0; - if cache_entry_required { - FinalityCache::::insert(header.id.hash, header.finality_votes); - } - } - - log::trace!( - target: "runtime", - "Inserting PoA header: ({}, {})", - header.header.number, - header.id.hash, - ); - - let last_signal_block = header.context.last_signal_block(); - HeadersByNumber::::append(header.id.number, header.id.hash); - Headers::::insert( - &header.id.hash, - StoredHeader { - submitter: header.context.submitter, - header: header.header, - total_difficulty: header.total_difficulty, - next_validators_set_id, - last_signal_block, - }, - ); - } - - fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64) { - // remember just finalized block - let finalized_number = finalized - .as_ref() - .map(|f| f.number) - .unwrap_or_else(|| FinalizedBlock::::get().number); - if let Some(finalized) = finalized { - log::trace!( - target: "runtime", - "Finalizing PoA header: ({}, {})", - finalized.number, - finalized.hash, - ); - - FinalizedBlock::::put(finalized); - } - - // and now prune headers if we need to - self.prune_blocks(MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT, finalized_number, prune_end); - } -} - -/// Initialize storage. -#[cfg(any(feature = "std", feature = "runtime-benchmarks"))] -pub(crate) fn initialize_storage, I: 'static>( - initial_header: &AuraHeader, - initial_difficulty: U256, - initial_validators: &[Address], -) { - let initial_hash = initial_header.compute_hash(); - log::trace!( - target: "runtime", - "Initializing bridge with PoA header: ({}, {})", - initial_header.number, - initial_hash, - ); - - let initial_id = HeaderId { number: initial_header.number, hash: initial_hash }; - BestBlock::::put((initial_id, initial_difficulty)); - FinalizedBlock::::put(initial_id); - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: initial_header.number, - oldest_block_to_keep: initial_header.number, - }); - HeadersByNumber::::insert(initial_header.number, vec![initial_hash]); - Headers::::insert( - initial_hash, - StoredHeader { - submitter: None, - header: initial_header.clone(), - total_difficulty: initial_difficulty, - next_validators_set_id: 0, - last_signal_block: None, - }, - ); - NextValidatorsSetId::::put(1); - ValidatorsSets::::insert( - 0, - ValidatorsSet { - validators: initial_validators.to_vec(), - signal_block: None, - enact_block: initial_id, - }, - ); - ValidatorsSetsRc::::insert(0, 1); -} - -/// Verify that transaction is included into given finalized block. -pub fn verify_transaction_finalized( - storage: &S, - block: H256, - tx_index: u64, - proof: &[(RawTransaction, RawTransactionReceipt)], -) -> bool { - if tx_index >= proof.len() as _ { - log::trace!( - target: "runtime", - "Tx finality check failed: transaction index ({}) is larger than number of transactions ({})", - tx_index, - proof.len(), - ); - - return false - } - - let header = match storage.header(&block) { - Some((header, _)) => header, - None => { - log::trace!( - target: "runtime", - "Tx finality check failed: can't find header in the storage: {}", - block, - ); - - return false - }, - }; - let finalized = storage.finalized_block(); - - // if header is not yet finalized => return - if header.number > finalized.number { - log::trace!( - target: "runtime", - "Tx finality check failed: header {}/{} is not finalized. Best finalized: {}", - header.number, - block, - finalized.number, - ); - - return false - } - - // check if header is actually finalized - let is_finalized = match header.number < finalized.number { - true => ancestry(storage, finalized.hash) - .skip_while(|(_, ancestor)| ancestor.number > header.number) - .any(|(ancestor_hash, _)| ancestor_hash == block), - false => block == finalized.hash, - }; - if !is_finalized { - log::trace!( - target: "runtime", - "Tx finality check failed: header {} is not finalized: no canonical path to best finalized block {}", - block, - finalized.hash, - ); - - return false - } - - // verify that transaction is included in the block - if let Err(computed_root) = header.check_transactions_root(proof.iter().map(|(tx, _)| tx)) { - log::trace!( - target: "runtime", - "Tx finality check failed: transactions root mismatch. Expected: {}, computed: {}", - header.transactions_root, - computed_root, - ); - - return false - } - - // verify that transaction receipt is included in the block - if let Err(computed_root) = header.check_raw_receipts_root(proof.iter().map(|(_, r)| r)) { - log::trace!( - target: "runtime", - "Tx finality check failed: receipts root mismatch. Expected: {}, computed: {}", - header.receipts_root, - computed_root, - ); - - return false - } - - // check that transaction has completed successfully - let is_successful_raw_receipt = Receipt::is_successful_raw_receipt(&proof[tx_index as usize].1); - match is_successful_raw_receipt { - Ok(true) => true, - Ok(false) => { - log::trace!( - target: "runtime", - "Tx finality check failed: receipt shows that transaction has failed", - ); - - false - }, - Err(err) => { - log::trace!( - target: "runtime", - "Tx finality check failed: receipt check has failed: {}", - err, - ); - - false - }, - } -} - -/// Transaction pool configuration. -fn pool_configuration() -> PoolConfiguration { - PoolConfiguration { max_future_number_difference: 10 } -} - -/// Return iterator of given header ancestors. -fn ancestry( - storage: &'_ S, - mut parent_hash: H256, -) -> impl Iterator + '_ { - sp_std::iter::from_fn(move || { - let (header, _) = storage.header(&parent_hash)?; - if header.number == 0 { - return None - } - - let hash = parent_hash; - parent_hash = header.parent_hash; - Some((hash, header)) - }) -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::{ - finality::FinalityAncestor, - mock::{ - genesis, insert_header, run_test, run_test_with_genesis, validators_addresses, - HeaderBuilder, TestRuntime, GAS_LIMIT, - }, - test_utils::validator_utils::*, - }; - use bp_eth_poa::compute_merkle_root; - - const TOTAL_VALIDATORS: usize = 3; - - fn example_tx() -> Vec { - vec![42] - } - - fn example_tx_receipt(success: bool) -> Vec { - Receipt { - // the only thing that we care of: - outcome: bp_eth_poa::TransactionOutcome::StatusCode(if success { 1 } else { 0 }), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - } - - fn example_header_with_failed_receipt() -> AuraHeader { - HeaderBuilder::with_parent(&example_header()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(false)].into_iter())) - .sign_by(&validator(0)) - } - - fn example_header() -> AuraHeader { - HeaderBuilder::with_parent(&example_header_parent()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) - .sign_by(&validator(0)) - } - - fn example_header_parent() -> AuraHeader { - HeaderBuilder::with_parent(&genesis()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) - .sign_by(&validator(0)) - } - - fn with_headers_to_prune(f: impl Fn(BridgeStorage) -> T) -> T { - run_test(TOTAL_VALIDATORS, |ctx| { - for i in 1..10 { - let mut headers_by_number = Vec::with_capacity(5); - for j in 0..5 { - let header = HeaderBuilder::with_parent_number(i - 1) - .gas_limit((GAS_LIMIT + j).into()) - .sign_by_set(&ctx.validators); - let hash = header.compute_hash(); - headers_by_number.push(hash); - Headers::::insert( - hash, - StoredHeader { - submitter: None, - header, - total_difficulty: 0.into(), - next_validators_set_id: 0, - last_signal_block: None, - }, - ); - - if i == 7 && j == 1 { - ScheduledChanges::::insert( - hash, - AuraScheduledChange { - validators: validators_addresses(5), - prev_signal_block: None, - }, - ); - } - } - HeadersByNumber::::insert(i, headers_by_number); - } - - f(BridgeStorage::new()) - }) - } - - #[test] - fn blocks_are_not_pruned_if_range_is_empty() { - with_headers_to_prune(|storage| { - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 5, - oldest_block_to_keep: 5, - }); - - // try to prune blocks [5; 10) - storage.prune_blocks(0xFFFF, 10, 5); - assert_eq!(HeadersByNumber::::get(&5).unwrap().len(), 5); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 5, oldest_block_to_keep: 5 }, - ); - }); - } - - #[test] - fn blocks_to_prune_never_shrinks_from_the_end() { - with_headers_to_prune(|storage| { - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: 5, - }); - - // try to prune blocks [5; 10) - storage.prune_blocks(0xFFFF, 10, 3); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 5, oldest_block_to_keep: 5 }, - ); - }); - } - - #[test] - fn blocks_are_not_pruned_if_limit_is_zero() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - storage.prune_blocks(0, 10, 10); - assert!(HeadersByNumber::::get(&0).is_some()); - assert!(HeadersByNumber::::get(&1).is_some()); - assert!(HeadersByNumber::::get(&2).is_some()); - assert!(HeadersByNumber::::get(&3).is_some()); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 0, oldest_block_to_keep: 10 }, - ); - }); - } - - #[test] - fn blocks_are_pruned_if_limit_is_non_zero() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - storage.prune_blocks(7, 10, 10); - // 1 headers with number = 0 is pruned (1 total) - assert!(HeadersByNumber::::get(&0).is_none()); - // 5 headers with number = 1 are pruned (6 total) - assert!(HeadersByNumber::::get(&1).is_none()); - // 1 header with number = 2 are pruned (7 total) - assert_eq!(HeadersByNumber::::get(&2).unwrap().len(), 4); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 2, oldest_block_to_keep: 10 }, - ); - - // try to prune blocks [2; 10) - storage.prune_blocks(11, 10, 10); - // 4 headers with number = 2 are pruned (4 total) - assert!(HeadersByNumber::::get(&2).is_none()); - // 5 headers with number = 3 are pruned (9 total) - assert!(HeadersByNumber::::get(&3).is_none()); - // 2 headers with number = 4 are pruned (11 total) - assert_eq!(HeadersByNumber::::get(&4).unwrap().len(), 3); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 4, oldest_block_to_keep: 10 }, - ); - }); - } - - #[test] - fn pruning_stops_on_unfainalized_block_with_scheduled_change() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - // last finalized block is 5 - // and one of blocks#7 has scheduled change - // => we won't prune any block#7 at all - storage.prune_blocks(0xFFFF, 5, 10); - assert!(HeadersByNumber::::get(&0).is_none()); - assert!(HeadersByNumber::::get(&1).is_none()); - assert!(HeadersByNumber::::get(&2).is_none()); - assert!(HeadersByNumber::::get(&3).is_none()); - assert!(HeadersByNumber::::get(&4).is_none()); - assert!(HeadersByNumber::::get(&5).is_none()); - assert!(HeadersByNumber::::get(&6).is_none()); - assert_eq!(HeadersByNumber::::get(&7).unwrap().len(), 5); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 7, oldest_block_to_keep: 10 }, - ); - }); - } - - #[test] - fn finality_votes_are_cached() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - let interval = ::FinalityVotesCachingInterval::get().unwrap(); - - // for all headers with number < interval, cache entry is not created - for i in 1..interval { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); - let id = header.compute_id(); - insert_header(&mut storage, header); - assert_eq!(FinalityCache::::get(&id.hash), None); - } - - // for header with number = interval, cache entry is created - let header_with_entry = - HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators); - let header_with_entry_hash = header_with_entry.compute_hash(); - insert_header(&mut storage, header_with_entry); - assert!(FinalityCache::::get(&header_with_entry_hash).is_some()); - - // when we later prune this header, cache entry is removed - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: interval - 1, - oldest_block_to_keep: interval - 1, - }); - storage.finalize_and_prune_headers(None, interval + 1); - assert_eq!(FinalityCache::::get(&header_with_entry_hash), None); - }); - } - - #[test] - fn cached_finality_votes_finds_entry() { - run_test(TOTAL_VALIDATORS, |ctx| { - // insert 5 headers - let mut storage = BridgeStorage::::new(); - let mut headers = Vec::new(); - for i in 1..5 { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); - headers.push(header.clone()); - insert_header(&mut storage, header); - } - - // when inserting header#6, entry isn't found - let id5 = headers.last().unwrap().compute_id(); - assert_eq!( - storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: headers - .iter() - .map(|header| (header.compute_id(), None, header.clone(),)) - .rev() - .collect(), - votes: None, - }, - ); - - // let's now create entry at #3 - let hash3 = headers[2].compute_hash(); - let votes_at_3 = FinalityVotes { - votes: vec![([42; 20].into(), 21)].into_iter().collect(), - ancestry: vec![FinalityAncestor { - id: HeaderId { number: 100, hash: Default::default() }, - ..Default::default() - }] - .into_iter() - .collect(), - }; - FinalityCache::::insert(hash3, votes_at_3.clone()); - - // searching at #6 again => entry is found - assert_eq!( - storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: headers - .iter() - .skip(3) - .map(|header| (header.compute_id(), None, header.clone(),)) - .rev() - .collect(), - votes: Some(votes_at_3), - }, - ); - }); - } - - #[test] - fn cached_finality_votes_stops_at_finalized_sibling() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - - // insert header1 - let header1 = HeaderBuilder::with_parent_number(0).sign_by_set(&ctx.validators); - let header1_id = header1.compute_id(); - insert_header(&mut storage, header1); - - // insert header1' - sibling of header1 - let header1s = HeaderBuilder::with_parent_number(0) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(&ctx.validators); - let header1s_id = header1s.compute_id(); - insert_header(&mut storage, header1s); - - // header1 is finalized - FinalizedBlock::::put(header1_id); - - // trying to get finality votes when importing header2 -> header1 succeeds - assert!( - !storage - .cached_finality_votes(&header1_id, &genesis().compute_id(), |_| false) - .stopped_at_finalized_sibling - ); - - // trying to get finality votes when importing header2s -> header1s fails - assert!( - storage - .cached_finality_votes(&header1s_id, &header1_id, |_| false) - .stopped_at_finalized_sibling - ); - }); - } - - #[test] - fn verify_transaction_finalized_works_for_best_finalized_header() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_works_for_best_finalized_header_ancestor() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(verify_transaction_finalized( - &storage, - example_header_parent().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_proof_with_missing_tx() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 1, - &[], - ),); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_unknown_header() { - run_test(TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 1, - &[], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_unfinalized_header() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_finalized_header_sibling() { - run_test(TOTAL_VALIDATORS, |_| { - let mut finalized_header_sibling = example_header(); - finalized_header_sibling.timestamp = 1; - let finalized_header_sibling_hash = finalized_header_sibling.compute_hash(); - - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - insert_header(&mut storage, finalized_header_sibling); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(!verify_transaction_finalized( - &storage, - finalized_header_sibling_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_finalized_header_uncle() { - run_test(TOTAL_VALIDATORS, |_| { - let mut finalized_header_uncle = example_header_parent(); - finalized_header_uncle.timestamp = 1; - let finalized_header_uncle_hash = finalized_header_uncle.compute_hash(); - - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, finalized_header_uncle); - insert_header(&mut storage, example_header()); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(!verify_transaction_finalized( - &storage, - finalized_header_uncle_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_invalid_transactions_in_proof() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[ - (example_tx(), example_tx_receipt(true)), - (example_tx(), example_tx_receipt(true)) - ], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_invalid_receipts_in_proof() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), vec![42])], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_failed_transaction() { - run_test_with_genesis(example_header_with_failed_receipt(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header_with_failed_receipt().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(false))], - )); - }); - } -} diff --git a/modules/ethereum/src/mock.rs b/modules/ethereum/src/mock.rs deleted file mode 100644 index 877f7a9dc11f..000000000000 --- a/modules/ethereum/src/mock.rs +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -pub use crate::test_utils::{ - insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT, -}; -pub use bp_eth_poa::signatures::secret_to_address; - -use crate::{ - validators::{ValidatorsConfiguration, ValidatorsSource}, - AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy, -}; -use bp_eth_poa::{Address, AuraHeader, H256, U256}; -use frame_support::{parameter_types, traits::GenesisBuild, weights::Weight}; -use libsecp256k1::SecretKey; -use sp_runtime::{ - testing::Header as SubstrateHeader, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as pallet_ethereum; - -frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Ethereum: pallet_ethereum::{Pallet, Call}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = SubstrateHeader; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -parameter_types! { - pub const TestFinalityVotesCachingInterval: Option = Some(16); - pub TestAuraConfiguration: AuraConfiguration = test_aura_config(); - pub TestValidatorsConfiguration: ValidatorsConfiguration = test_validators_config(); -} - -impl Config for TestRuntime { - type AuraConfiguration = TestAuraConfiguration; - type ValidatorsConfiguration = TestValidatorsConfiguration; - type FinalityVotesCachingInterval = TestFinalityVotesCachingInterval; - type PruningStrategy = KeepSomeHeadersBehindBest; - type ChainTime = ConstChainTime; - type OnHeadersSubmitted = (); -} - -/// Test context. -pub struct TestContext { - /// Initial (genesis) header. - pub genesis: AuraHeader, - /// Number of initial validators. - pub total_validators: usize, - /// Secret keys of validators, ordered by validator index. - pub validators: Vec, - /// Addresses of validators, ordered by validator index. - pub addresses: Vec
, -} - -/// Aura configuration that is used in tests by default. -pub fn test_aura_config() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: u64::max_value(), - strict_empty_steps_transition: 0, - validate_step_transition: 0x16e360, - validate_score_transition: 0x41a3c4, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration that is used in tests by default. -pub fn test_validators_config() -> ValidatorsConfiguration { - ValidatorsConfiguration::Single(ValidatorsSource::List(validators_addresses(3))) -} - -/// Genesis header that is used in tests by default. -pub fn genesis() -> AuraHeader { - HeaderBuilder::genesis().sign_by(&validator(0)) -} - -/// Run test with default genesis header. -pub fn run_test(total_validators: usize, test: impl FnOnce(TestContext) -> T) -> T { - run_test_with_genesis(genesis(), total_validators, test) -} - -/// Run test with default genesis header. -pub fn run_test_with_genesis( - genesis: AuraHeader, - total_validators: usize, - test: impl FnOnce(TestContext) -> T, -) -> T { - let validators = validators(total_validators); - let addresses = validators_addresses(total_validators); - sp_io::TestExternalities::from( - GenesisBuild::::build_storage(&CrateGenesisConfig { - initial_header: genesis.clone(), - initial_difficulty: 0.into(), - initial_validators: addresses.clone(), - }) - .unwrap(), - ) - .execute_with(|| test(TestContext { genesis, total_validators, validators, addresses })) -} - -/// Pruning strategy that keeps 10 headers behind best block. -pub struct KeepSomeHeadersBehindBest(pub u64); - -impl Default for KeepSomeHeadersBehindBest { - fn default() -> KeepSomeHeadersBehindBest { - KeepSomeHeadersBehindBest(10) - } -} - -impl PruningStrategy for KeepSomeHeadersBehindBest { - fn pruning_upper_bound(&mut self, best_number: u64, _: u64) -> u64 { - best_number.saturating_sub(self.0) - } -} - -/// Constant chain time -#[derive(Default)] -pub struct ConstChainTime; - -impl ChainTime for ConstChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = i32::max_value() as u64 / 2; - timestamp > now - } -} diff --git a/modules/ethereum/src/test_utils.rs b/modules/ethereum/src/test_utils.rs deleted file mode 100644 index 414445f3aacc..000000000000 --- a/modules/ethereum/src/test_utils.rs +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for testing and benchmarking the Ethereum Bridge Pallet. -//! -//! Although the name implies that it is used by tests, it shouldn't be be used _directly_ by tests. -//! Instead these utilities should be used by the Mock runtime, which in turn is used by tests. -//! -//! On the other hand, they may be used directly by the benchmark module. - -// Since this is test code it's fine that not everything is used -#![allow(dead_code)] - -use crate::{ - finality::FinalityVotes, validators::CHANGE_EVENT_HASH, verification::calculate_score, Config, - HeaderToImport, Storage, -}; - -use bp_eth_poa::{ - rlp_encode, - signatures::{secret_to_address, sign, SignHeader}, - Address, AuraHeader, Bloom, Receipt, SealedEmptyStep, H256, U256, -}; -use libsecp256k1::SecretKey; -use sp_std::prelude::*; - -/// Gas limit valid in test environment. -pub const GAS_LIMIT: u64 = 0x2000; - -/// Test header builder. -pub struct HeaderBuilder { - header: AuraHeader, - parent_header: AuraHeader, -} - -impl HeaderBuilder { - /// Creates default genesis header. - pub fn genesis() -> Self { - let current_step = 0u64; - Self { - header: AuraHeader { - gas_limit: GAS_LIMIT.into(), - seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], - ..Default::default() - }, - parent_header: Default::default(), - } - } - - /// Creates default header on top of test parent with given hash. - #[cfg(test)] - pub fn with_parent_hash(parent_hash: H256) -> Self { - Self::with_parent_hash_on_runtime::(parent_hash) - } - - /// Creates default header on top of test parent with given number. First parent is selected. - #[cfg(test)] - pub fn with_parent_number(parent_number: u64) -> Self { - Self::with_parent_number_on_runtime::(parent_number) - } - - /// Creates default header on top of parent with given hash. - pub fn with_parent_hash_on_runtime, I: 'static>(parent_hash: H256) -> Self { - use crate::Headers; - - let parent_header = Headers::::get(&parent_hash).unwrap().header; - Self::with_parent(&parent_header) - } - - /// Creates default header on top of parent with given number. First parent is selected. - pub fn with_parent_number_on_runtime, I: 'static>(parent_number: u64) -> Self { - use crate::HeadersByNumber; - - let parent_hash = HeadersByNumber::::get(parent_number).unwrap()[0]; - Self::with_parent_hash_on_runtime::(parent_hash) - } - - /// Creates default header on top of non-existent parent. - #[cfg(test)] - pub fn with_number(number: u64) -> Self { - Self::with_parent(&AuraHeader { - number: number - 1, - seal: vec![bp_eth_poa::rlp_encode(&(number - 1)).to_vec(), vec![]], - ..Default::default() - }) - } - - /// Creates default header on top of given parent. - pub fn with_parent(parent_header: &AuraHeader) -> Self { - let parent_step = parent_header.step().unwrap(); - let current_step = parent_step + 1; - Self { - header: AuraHeader { - parent_hash: parent_header.compute_hash(), - number: parent_header.number + 1, - gas_limit: GAS_LIMIT.into(), - seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], - difficulty: calculate_score(parent_step, current_step, 0), - ..Default::default() - }, - parent_header: parent_header.clone(), - } - } - - /// Update step of this header. - pub fn step(mut self, step: u64) -> Self { - let parent_step = self.parent_header.step(); - self.header.seal[0] = rlp_encode(&step).to_vec(); - self.header.difficulty = parent_step - .map(|parent_step| calculate_score(parent_step, step, 0)) - .unwrap_or_default(); - self - } - - /// Adds empty steps to this header. - pub fn empty_steps(mut self, empty_steps: &[(&SecretKey, u64)]) -> Self { - let sealed_empty_steps = empty_steps - .iter() - .map(|(author, step)| { - let mut empty_step = SealedEmptyStep { step: *step, signature: Default::default() }; - let message = empty_step.message(&self.header.parent_hash); - let signature: [u8; 65] = sign(author, message).into(); - empty_step.signature = signature.into(); - empty_step - }) - .collect::>(); - - // by default in test configuration headers are generated without empty steps seal - if self.header.seal.len() < 3 { - self.header.seal.push(Vec::new()); - } - - self.header.seal[2] = SealedEmptyStep::rlp_of(&sealed_empty_steps); - self - } - - /// Update difficulty field of this header. - pub fn difficulty(mut self, difficulty: U256) -> Self { - self.header.difficulty = difficulty; - self - } - - /// Update extra data field of this header. - pub fn extra_data(mut self, extra_data: Vec) -> Self { - self.header.extra_data = extra_data; - self - } - - /// Update gas limit field of this header. - pub fn gas_limit(mut self, gas_limit: U256) -> Self { - self.header.gas_limit = gas_limit; - self - } - - /// Update gas used field of this header. - pub fn gas_used(mut self, gas_used: U256) -> Self { - self.header.gas_used = gas_used; - self - } - - /// Update log bloom field of this header. - pub fn log_bloom(mut self, log_bloom: Bloom) -> Self { - self.header.log_bloom = log_bloom; - self - } - - /// Update receipts root field of this header. - pub fn receipts_root(mut self, receipts_root: H256) -> Self { - self.header.receipts_root = receipts_root; - self - } - - /// Update timestamp field of this header. - pub fn timestamp(mut self, timestamp: u64) -> Self { - self.header.timestamp = timestamp; - self - } - - /// Update transactions root field of this header. - pub fn transactions_root(mut self, transactions_root: H256) -> Self { - self.header.transactions_root = transactions_root; - self - } - - /// Signs header by given author. - pub fn sign_by(self, author: &SecretKey) -> AuraHeader { - self.header.sign_by(author) - } - - /// Signs header by given authors set. - pub fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader { - self.header.sign_by_set(authors) - } -} - -/// Helper function for getting a genesis header which has been signed by an authority. -pub fn build_genesis_header(author: &SecretKey) -> AuraHeader { - let genesis = HeaderBuilder::genesis(); - genesis.header.sign_by(author) -} - -/// Helper function for building a custom child header which has been signed by an authority. -pub fn build_custom_header( - author: &SecretKey, - previous: &AuraHeader, - customize_header: F, -) -> AuraHeader -where - F: FnOnce(AuraHeader) -> AuraHeader, -{ - let new_header = HeaderBuilder::with_parent(previous); - let custom_header = customize_header(new_header.header); - custom_header.sign_by(author) -} - -/// Insert unverified header into storage. -/// -/// This function assumes that the header is signed by validator from the current set. -pub fn insert_header(storage: &mut S, header: AuraHeader) { - let id = header.compute_id(); - let best_finalized = storage.finalized_block(); - let import_context = storage.import_context(None, &header.parent_hash).unwrap(); - let parent_finality_votes = - storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false); - let finality_votes = crate::finality::prepare_votes( - parent_finality_votes, - best_finalized, - &import_context.validators_set().validators.iter().collect(), - id, - &header, - None, - ) - .unwrap(); - - storage.insert_header(HeaderToImport { - context: storage.import_context(None, &header.parent_hash).unwrap(), - is_best: true, - id, - header, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes, - }); -} - -/// Insert unverified header into storage. -/// -/// No assumptions about header author are made. The cost is that finality votes cache -/// is filled incorrectly, so this function shall not be used if you're going to insert -/// (or import) header descendants. -pub fn insert_dummy_header(storage: &mut S, header: AuraHeader) { - storage.insert_header(HeaderToImport { - context: storage.import_context(None, &header.parent_hash).unwrap(), - is_best: true, - id: header.compute_id(), - header, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes: FinalityVotes::default(), - }); -} - -pub fn validators_change_receipt(parent_hash: H256) -> Receipt { - use bp_eth_poa::{LogEntry, TransactionOutcome}; - - Receipt { - gas_used: 0.into(), - log_bloom: (&[0xff; 256]).into(), - outcome: TransactionOutcome::Unknown, - logs: vec![LogEntry { - address: [3; 20].into(), - topics: vec![CHANGE_EVENT_HASH.into(), parent_hash], - data: vec![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - ], - }], - } -} - -pub mod validator_utils { - use super::*; - - /// Return key pair of given test validator. - pub fn validator(index: usize) -> SecretKey { - let mut raw_secret = [0u8; 32]; - raw_secret[..8].copy_from_slice(&(index + 1).to_le_bytes()); - SecretKey::parse(&raw_secret).unwrap() - } - - /// Return key pairs of all test validators. - pub fn validators(count: usize) -> Vec { - (0..count).map(validator).collect() - } - - /// Return address of test validator. - pub fn validator_address(index: usize) -> Address { - secret_to_address(&validator(index)) - } - - /// Return addresses of all test validators. - pub fn validators_addresses(count: usize) -> Vec
{ - (0..count).map(validator_address).collect() - } -} diff --git a/modules/ethereum/src/validators.rs b/modules/ethereum/src/validators.rs deleted file mode 100644 index fd010d52c39d..000000000000 --- a/modules/ethereum/src/validators.rs +++ /dev/null @@ -1,458 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{error::Error, ChangeToEnact, Storage}; -use bp_eth_poa::{Address, AuraHeader, HeaderId, LogEntry, Receipt, U256}; -use sp_std::prelude::*; - -/// The hash of InitiateChange event of the validators set contract. -pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[ - 0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, - 0xd2, 0xc2, 0x28, 0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89, -]; - -/// Where source of validators addresses come from. This covers the chain lifetime. -pub enum ValidatorsConfiguration { - /// There's a single source for the whole chain lifetime. - Single(ValidatorsSource), - /// Validators source changes at given blocks. The blocks are ordered - /// by the block number. - Multi(Vec<(u64, ValidatorsSource)>), -} - -/// Where validators addresses come from. -/// -/// This source is valid within some blocks range. The blocks range could -/// cover multiple epochs - i.e. the validators that are authoring blocks -/// within this range could change, but the source itself can not. -#[cfg_attr(any(test, feature = "runtime-benchmarks"), derive(Debug, PartialEq))] -pub enum ValidatorsSource { - /// The validators addresses are hardcoded and never change. - List(Vec
), - /// The validators addresses are determined by the validators set contract - /// deployed at given address. The contract must implement the `ValidatorSet` - /// interface. Additionally, the initial validators set must be provided. - Contract(Address, Vec
), -} - -/// A short hand for optional validators change. -pub type ValidatorsChange = Option>; - -/// Validators manager. -pub struct Validators<'a> { - config: &'a ValidatorsConfiguration, -} - -impl<'a> Validators<'a> { - /// Creates new validators manager using given configuration. - pub fn new(config: &'a ValidatorsConfiguration) -> Self { - Self { config } - } - - /// Returns true if header (probabilistically) signals validators change and - /// the caller needs to provide transactions receipts to import the header. - pub fn maybe_signals_validators_change(&self, header: &AuraHeader) -> bool { - let (_, _, source) = self.source_at(header.number); - - // if we are taking validators set from the fixed list, there's always - // single epoch - // => we never require transactions receipts - let contract_address = match source { - ValidatorsSource::List(_) => return false, - ValidatorsSource::Contract(contract_address, _) => contract_address, - }; - - // else we need to check logs bloom and if it has required bits set, it means - // that the contract has (probably) emitted epoch change event - let expected_bloom = LogEntry { - address: *contract_address, - topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], - data: Vec::new(), // irrelevant for bloom. - } - .bloom(); - - header.log_bloom.contains(&expected_bloom) - } - - /// Extracts validators change signal from the header. - /// - /// Returns tuple where first element is the change scheduled by this header - /// (i.e. this change is only applied starting from the block that has finalized - /// current block). The second element is the immediately applied change. - pub fn extract_validators_change( - &self, - header: &AuraHeader, - receipts: Option>, - ) -> Result<(ValidatorsChange, ValidatorsChange), Error> { - // let's first check if new source is starting from this header - let (source_index, _, source) = self.source_at(header.number); - let (next_starts_at, next_source) = self.source_at_next_header(source_index, header.number); - if next_starts_at == header.number { - match *next_source { - ValidatorsSource::List(ref new_list) => return Ok((None, Some(new_list.clone()))), - ValidatorsSource::Contract(_, ref new_list) => - return Ok((Some(new_list.clone()), None)), - } - } - - // else deal with previous source - // - // if we are taking validators set from the fixed list, there's always - // single epoch - // => we never require transactions receipts - let contract_address = match source { - ValidatorsSource::List(_) => return Ok((None, None)), - ValidatorsSource::Contract(contract_address, _) => contract_address, - }; - - // else we need to check logs bloom and if it has required bits set, it means - // that the contract has (probably) emitted epoch change event - let expected_bloom = LogEntry { - address: *contract_address, - topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], - data: Vec::new(), // irrelevant for bloom. - } - .bloom(); - - if !header.log_bloom.contains(&expected_bloom) { - return Ok((None, None)) - } - - let receipts = receipts.ok_or(Error::MissingTransactionsReceipts)?; - #[allow(clippy::question_mark)] - if header.check_receipts_root(&receipts).is_err() { - return Err(Error::TransactionsReceiptsMismatch) - } - - // iterate in reverse because only the _last_ change in a given - // block actually has any effect - Ok(( - receipts - .iter() - .rev() - .filter(|r| r.log_bloom.contains(&expected_bloom)) - .flat_map(|r| r.logs.iter()) - .filter(|l| { - l.address == *contract_address && - l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH && - l.topics[1] == header.parent_hash - }) - .filter_map(|l| { - let data_len = l.data.len(); - if data_len < 64 { - return None - } - - let new_validators_len_u256 = U256::from_big_endian(&l.data[32..64]); - let new_validators_len = new_validators_len_u256.low_u64(); - if new_validators_len_u256 != new_validators_len.into() { - return None - } - - if (data_len - 64) as u64 != new_validators_len.saturating_mul(32) { - return None - } - - Some( - l.data[64..] - .chunks(32) - .map(|chunk| { - let mut new_validator = Address::default(); - new_validator.as_mut().copy_from_slice(&chunk[12..32]); - new_validator - }) - .collect(), - ) - }) - .next(), - None, - )) - } - - /// Finalize changes when blocks are finalized. - pub fn finalize_validators_change( - &self, - storage: &S, - finalized_blocks: &[(HeaderId, Option)], - ) -> Option { - // if we haven't finalized any blocks, no changes may be finalized - let newest_finalized_id = match finalized_blocks.last().map(|(id, _)| id) { - Some(last_finalized_id) => last_finalized_id, - None => return None, - }; - let oldest_finalized_id = finalized_blocks - .first() - .map(|(id, _)| id) - .expect("finalized_blocks is not empty; qed"); - - // try to directly go to the header that has scheduled last change - // - // if we're unable to create import context for some block, it means - // that the header has already been pruned => it and its ancestors had - // no scheduled changes - // - // if we're unable to find scheduled changes for some block, it means - // that these changes have been finalized already - storage - .import_context(None, &newest_finalized_id.hash) - .and_then(|context| context.last_signal_block()) - .and_then(|signal_block| { - if signal_block.number >= oldest_finalized_id.number { - Some(signal_block) - } else { - None - } - }) - .and_then(|signal_block| { - storage.scheduled_change(&signal_block.hash).map(|change| ChangeToEnact { - signal_block: Some(signal_block), - validators: change.validators, - }) - }) - } - - /// Returns source of validators that should author the header. - fn source_at(&self, header_number: u64) -> (usize, u64, &ValidatorsSource) { - match self.config { - ValidatorsConfiguration::Single(ref source) => (0, 0, source), - ValidatorsConfiguration::Multi(ref sources) => sources - .iter() - .rev() - .enumerate() - .find(|(_, &(begin, _))| begin < header_number) - .map(|(i, (begin, source))| (sources.len() - 1 - i, *begin, source)) - .expect( - "there's always entry for the initial block;\ - we do not touch any headers with number < initial block number; qed", - ), - } - } - - /// Returns source of validators that should author the next header. - fn source_at_next_header( - &self, - header_source_index: usize, - header_number: u64, - ) -> (u64, &ValidatorsSource) { - match self.config { - ValidatorsConfiguration::Single(ref source) => (0, source), - ValidatorsConfiguration::Multi(ref sources) => { - let next_source_index = header_source_index + 1; - if next_source_index < sources.len() { - let next_source = &sources[next_source_index]; - if next_source.0 < header_number + 1 { - return (next_source.0, &next_source.1) - } - } - - let source = &sources[header_source_index]; - (source.0, &source.1) - }, - } - } -} - -impl ValidatorsSource { - /// Returns initial validators set. - pub fn initial_epoch_validators(&self) -> Vec
{ - match self { - ValidatorsSource::List(ref list) => list.clone(), - ValidatorsSource::Contract(_, ref list) => list.clone(), - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::{ - mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime}, - AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader, - }; - use bp_eth_poa::compute_merkle_root; - - const TOTAL_VALIDATORS: usize = 3; - - #[test] - fn source_at_works() { - let config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (100, ValidatorsSource::List(vec![[2; 20].into()])), - (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ]); - let validators = Validators::new(&config); - - assert_eq!(validators.source_at(99), (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])),); - assert_eq!( - validators.source_at_next_header(0, 99), - (0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - - assert_eq!( - validators.source_at(100), - (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(0, 100), - (100, &ValidatorsSource::List(vec![[2; 20].into()])), - ); - - assert_eq!( - validators.source_at(200), - (1, 100, &ValidatorsSource::List(vec![[2; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(1, 200), - (200, &ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ); - } - - #[test] - fn maybe_signals_validators_change_works() { - // when contract is active, but bloom has no required bits set - let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract( - Default::default(), - Vec::new(), - )); - let validators = Validators::new(&config); - let mut header = AuraHeader { number: u64::max_value(), ..Default::default() }; - assert!(!validators.maybe_signals_validators_change(&header)); - - // when contract is active and bloom has required bits set - header.log_bloom = (&[0xff; 256]).into(); - assert!(validators.maybe_signals_validators_change(&header)); - - // when list is active and bloom has required bits set - let config = ValidatorsConfiguration::Single(ValidatorsSource::List(vec![[42; 20].into()])); - let validators = Validators::new(&config); - assert!(!validators.maybe_signals_validators_change(&header)); - } - - #[test] - fn extract_validators_change_works() { - let config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (100, ValidatorsSource::List(vec![[2; 20].into()])), - (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ]); - let validators = Validators::new(&config); - let mut header = AuraHeader { number: 100, ..Default::default() }; - - // when we're at the block that switches to list source - assert_eq!( - validators.extract_validators_change(&header, None), - Ok((None, Some(vec![[2; 20].into()]))), - ); - - // when we're inside list range - header.number = 150; - assert_eq!(validators.extract_validators_change(&header, None), Ok((None, None)),); - - // when we're at the block that switches to contract source - header.number = 200; - assert_eq!( - validators.extract_validators_change(&header, None), - Ok((Some(vec![[3; 20].into()]), None)), - ); - - // when we're inside contract range and logs bloom signals change - // but we have no receipts - header.number = 250; - header.log_bloom = (&[0xff; 256]).into(); - assert_eq!( - validators.extract_validators_change(&header, None), - Err(Error::MissingTransactionsReceipts), - ); - - // when we're inside contract range and logs bloom signals change - // but there's no change in receipts - header.receipts_root = "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - .parse() - .unwrap(); - assert_eq!( - validators.extract_validators_change(&header, Some(Vec::new())), - Ok((None, None)), - ); - - // when we're inside contract range and logs bloom signals change - // and there's change in receipts - let receipts = vec![validators_change_receipt(Default::default())]; - header.receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); - assert_eq!( - validators.extract_validators_change(&header, Some(receipts)), - Ok((Some(vec![[7; 20].into()]), None)), - ); - - // when incorrect receipts root passed - assert_eq!( - validators.extract_validators_change(&header, Some(Vec::new())), - Err(Error::TransactionsReceiptsMismatch), - ); - } - - fn try_finalize_with_scheduled_change(scheduled_at: Option) -> Option { - run_test(TOTAL_VALIDATORS, |_| { - let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract( - Default::default(), - Vec::new(), - )); - let validators = Validators::new(&config); - let storage = BridgeStorage::::new(); - - // when we're finailizing blocks 10...100 - let id10 = HeaderId { number: 10, hash: [10; 32].into() }; - let id100 = HeaderId { number: 100, hash: [100; 32].into() }; - let finalized_blocks = vec![(id10, None), (id100, None)]; - let header100 = StoredHeader:: { - submitter: None, - header: AuraHeader { number: 100, ..Default::default() }, - total_difficulty: 0.into(), - next_validators_set_id: 0, - last_signal_block: scheduled_at, - }; - let scheduled_change = AuraScheduledChange { - validators: validators_addresses(1), - prev_signal_block: None, - }; - Headers::::insert(id100.hash, header100); - if let Some(scheduled_at) = scheduled_at { - ScheduledChanges::::insert(scheduled_at.hash, scheduled_change); - } - - validators.finalize_validators_change(&storage, &finalized_blocks) - }) - } - - #[test] - fn finalize_validators_change_finalizes_scheduled_change() { - let id50 = HeaderId { number: 50, ..Default::default() }; - assert_eq!( - try_finalize_with_scheduled_change(Some(id50)), - Some(ChangeToEnact { signal_block: Some(id50), validators: validators_addresses(1) }), - ); - } - - #[test] - fn finalize_validators_change_does_not_finalize_when_changes_are_not_scheduled() { - assert_eq!(try_finalize_with_scheduled_change(None), None,); - } - - #[test] - fn finalize_validators_change_does_not_finalize_changes_when_they_are_outside_of_range() { - let id5 = HeaderId { number: 5, ..Default::default() }; - assert_eq!(try_finalize_with_scheduled_change(Some(id5)), None,); - } -} diff --git a/modules/ethereum/src/verification.rs b/modules/ethereum/src/verification.rs deleted file mode 100644 index 053ce4d0fea4..000000000000 --- a/modules/ethereum/src/verification.rs +++ /dev/null @@ -1,972 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::Error, - validators::{Validators, ValidatorsConfiguration}, - AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage, -}; -use bp_eth_poa::{ - public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep, - H256, H520, U128, U256, -}; -use codec::Encode; -use sp_io::crypto::secp256k1_ecdsa_recover; -use sp_runtime::transaction_validity::TransactionTag; -use sp_std::{vec, vec::Vec}; - -/// Pre-check to see if should try and import this header. -/// Returns error if we should not try to import this block. -/// Returns ID of passed header and best finalized header. -pub fn is_importable_header( - storage: &S, - header: &AuraHeader, -) -> Result<(HeaderId, HeaderId), Error> { - // we never import any header that competes with finalized header - let finalized_id = storage.finalized_block(); - if header.number <= finalized_id.number { - return Err(Error::AncientHeader) - } - // we never import any header with known hash - let id = header.compute_id(); - if storage.header(&id.hash).is_some() { - return Err(Error::KnownHeader) - } - - Ok((id, finalized_id)) -} - -/// Try to accept unsigned aura header into transaction pool. -/// -/// Returns required and provided tags. -pub fn accept_aura_header_into_pool( - storage: &S, - config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - pool_config: &PoolConfiguration, - header: &AuraHeader, - chain_time: &CT, - receipts: Option<&Vec>, -) -> Result<(Vec, Vec), Error> { - // check if we can verify further - let (header_id, _) = is_importable_header(storage, header)?; - - // we can always do contextless checks - contextless_checks(config, header, chain_time)?; - - // we want to avoid having same headers twice in the pool - // => we're strict about receipts here - if we need them, we require receipts to be Some, - // otherwise we require receipts to be None - let receipts_required = - Validators::new(validators_config).maybe_signals_validators_change(header); - match (receipts_required, receipts.is_some()) { - (true, false) => return Err(Error::MissingTransactionsReceipts), - (false, true) => return Err(Error::RedundantTransactionsReceipts), - _ => (), - } - - // we do not want to have all future headers in the pool at once - // => if we see header with number > maximal ever seen header number + LIMIT, - // => we consider this transaction invalid, but only at this moment (we do not want to ban it) - // => let's mark it as Unknown transaction - let (best_id, _) = storage.best_block(); - let difference = header.number.saturating_sub(best_id.number); - if difference > pool_config.max_future_number_difference { - return Err(Error::UnsignedTooFarInTheFuture) - } - - // TODO: only accept new headers when we're at the tip of PoA chain - // https://github.com/paritytech/parity-bridges-common/issues/38 - - // we want to see at most one header with given number from single authority - // => every header is providing tag (block_number + authority) - // => since only one tx in the pool can provide the same tag, they're auto-deduplicated - let provides_number_and_authority_tag = (header.number, header.author).encode(); - - // we want to see several 'future' headers in the pool at once, but we may not have access to - // previous headers here - // => we can at least 'verify' that headers comprise a chain by providing and requiring - // tag (header.number, header.hash) - let provides_header_number_and_hash_tag = header_id.encode(); - - // depending on whether parent header is available, we either perform full or 'shortened' check - let context = storage.import_context(None, &header.parent_hash); - let tags = match context { - Some(context) => { - let header_step = contextual_checks(config, &context, None, header)?; - validator_checks(config, &context.validators_set().validators, header, header_step)?; - - // since our parent is already in the storage, we do not require it - // to be in the transaction pool - (vec![], vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag]) - }, - None => { - // we know nothing about parent header - // => the best thing we can do is to believe that there are no forks in - // PoA chain AND that the header is produced either by previous, or next - // scheduled validators set change - let header_step = header.step().ok_or(Error::MissingStep)?; - let best_context = storage.import_context(None, &best_id.hash).expect( - "import context is None only when header is missing from the storage;\ - best header is always in the storage; qed", - ); - let validators_check_result = validator_checks( - config, - &best_context.validators_set().validators, - header, - header_step, - ); - if let Err(error) = validators_check_result { - find_next_validators_signal(storage, &best_context).ok_or(error).and_then( - |next_validators| { - validator_checks(config, &next_validators, header, header_step) - }, - )?; - } - - // since our parent is missing from the storage, we **DO** require it - // to be in the transaction pool - // (- 1 can't underflow because there's always best block in the header) - let requires_header_number_and_hash_tag = - HeaderId { number: header.number - 1, hash: header.parent_hash }.encode(); - ( - vec![requires_header_number_and_hash_tag], - vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag], - ) - }, - }; - - // the heaviest, but rare operation - we do not want invalid receipts in the pool - if let Some(receipts) = receipts { - log::trace!(target: "runtime", "Got receipts! {:?}", receipts); - #[allow(clippy::question_mark)] - if header.check_receipts_root(receipts).is_err() { - return Err(Error::TransactionsReceiptsMismatch) - } - } - - Ok(tags) -} - -/// Verify header by Aura rules. -pub fn verify_aura_header( - storage: &S, - config: &AuraConfiguration, - submitter: Option, - header: &AuraHeader, - chain_time: &CT, -) -> Result, Error> { - // let's do the lightest check first - contextless_checks(config, header, chain_time)?; - - // the rest of checks requires access to the parent header - let context = storage.import_context(submitter, &header.parent_hash).ok_or_else(|| { - log::warn!( - target: "runtime", - "Missing parent PoA block: ({:?}, {})", - header.number.checked_sub(1), - header.parent_hash, - ); - - Error::MissingParentBlock - })?; - let header_step = contextual_checks(config, &context, None, header)?; - validator_checks(config, &context.validators_set().validators, header, header_step)?; - - Ok(context) -} - -/// Perform basic checks that only require header itself. -fn contextless_checks( - config: &AuraConfiguration, - header: &AuraHeader, - chain_time: &CT, -) -> Result<(), Error> { - let expected_seal_fields = expected_header_seal_fields(config, header); - if header.seal.len() != expected_seal_fields { - return Err(Error::InvalidSealArity) - } - if header.number >= u64::max_value() { - return Err(Error::RidiculousNumber) - } - if header.gas_used > header.gas_limit { - return Err(Error::TooMuchGasUsed) - } - if header.gas_limit < config.min_gas_limit { - return Err(Error::InvalidGasLimit) - } - if header.gas_limit > config.max_gas_limit { - return Err(Error::InvalidGasLimit) - } - if header.number != 0 && header.extra_data.len() as u64 > config.maximum_extra_data_size { - return Err(Error::ExtraDataOutOfBounds) - } - - // we can't detect if block is from future in runtime - // => let's only do an overflow check - if header.timestamp > i32::max_value() as u64 { - return Err(Error::TimestampOverflow) - } - - if chain_time.is_timestamp_ahead(header.timestamp) { - return Err(Error::HeaderTimestampIsAhead) - } - - Ok(()) -} - -/// Perform checks that require access to parent header. -fn contextual_checks( - config: &AuraConfiguration, - context: &ImportContext, - validators_override: Option<&[Address]>, - header: &AuraHeader, -) -> Result { - let validators = validators_override.unwrap_or_else(|| &context.validators_set().validators); - let header_step = header.step().ok_or(Error::MissingStep)?; - let parent_step = context.parent_header().step().ok_or(Error::MissingStep)?; - - // Ensure header is from the step after context. - if header_step == parent_step { - return Err(Error::DoubleVote) - } - #[allow(clippy::suspicious_operation_groupings)] - if header.number >= config.validate_step_transition && header_step < parent_step { - return Err(Error::DoubleVote) - } - - // If empty step messages are enabled we will validate the messages in the seal, missing - // messages are not reported as there's no way to tell whether the empty step message was never - // sent or simply not included. - let empty_steps_len = match header.number >= config.empty_steps_transition { - true => { - let strict_empty_steps = header.number >= config.strict_empty_steps_transition; - let empty_steps = header.empty_steps().ok_or(Error::MissingEmptySteps)?; - let empty_steps_len = empty_steps.len(); - let mut prev_empty_step = 0; - - for empty_step in empty_steps { - if empty_step.step <= parent_step || empty_step.step >= header_step { - return Err(Error::InsufficientProof) - } - - if !verify_empty_step(&header.parent_hash, &empty_step, validators) { - return Err(Error::InsufficientProof) - } - - if strict_empty_steps { - if empty_step.step <= prev_empty_step { - return Err(Error::InsufficientProof) - } - - prev_empty_step = empty_step.step; - } - } - - empty_steps_len - }, - false => 0, - }; - - // Validate chain score. - if header.number >= config.validate_score_transition { - let expected_difficulty = calculate_score(parent_step, header_step, empty_steps_len as _); - if header.difficulty != expected_difficulty { - return Err(Error::InvalidDifficulty) - } - } - - Ok(header_step) -} - -/// Check that block is produced by expected validator. -fn validator_checks( - config: &AuraConfiguration, - validators: &[Address], - header: &AuraHeader, - header_step: u64, -) -> Result<(), Error> { - let expected_validator = *step_validator(validators, header_step); - if header.author != expected_validator { - return Err(Error::NotValidator) - } - - let validator_signature = header.signature().ok_or(Error::MissingSignature)?; - let header_seal_hash = header - .seal_hash(header.number >= config.empty_steps_transition) - .ok_or(Error::MissingEmptySteps)?; - let is_invalid_proposer = - !verify_signature(&expected_validator, &validator_signature, &header_seal_hash); - if is_invalid_proposer { - return Err(Error::NotValidator) - } - - Ok(()) -} - -/// Returns expected number of seal fields in the header. -fn expected_header_seal_fields(config: &AuraConfiguration, header: &AuraHeader) -> usize { - if header.number != u64::max_value() && header.number >= config.empty_steps_transition { - 3 - } else { - 2 - } -} - -/// Verify single sealed empty step. -fn verify_empty_step(parent_hash: &H256, step: &SealedEmptyStep, validators: &[Address]) -> bool { - let expected_validator = *step_validator(validators, step.step); - let message = step.message(parent_hash); - verify_signature(&expected_validator, &step.signature, &message) -} - -/// Chain scoring: total `weight is sqrt(U256::max_value())*height - step`. -pub(crate) fn calculate_score( - parent_step: u64, - current_step: u64, - current_empty_steps: usize, -) -> U256 { - U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) + - U256::from(current_empty_steps) -} - -/// Verify that the signature over message has been produced by given validator. -fn verify_signature(expected_validator: &Address, signature: &H520, message: &H256) -> bool { - secp256k1_ecdsa_recover(signature.as_fixed_bytes(), message.as_fixed_bytes()) - .map(|public| public_to_address(&public)) - .map(|address| *expected_validator == address) - .unwrap_or(false) -} - -/// Find next unfinalized validators set change after finalized set. -fn find_next_validators_signal( - storage: &S, - context: &ImportContext, -) -> Option> { - // that's the earliest block number we may met in following loop - // it may be None if that's the first set - let best_set_signal_block = context.validators_set().signal_block; - - // if parent schedules validators set change, then it may be our set - // else we'll start with last known change - let mut current_set_signal_block = context.last_signal_block(); - let mut next_scheduled_set: Option = None; - - loop { - // if we have reached block that signals finalized change, then - // next_current_block_hash points to the block that schedules next - // change - let current_scheduled_set = match current_set_signal_block { - Some(current_set_signal_block) - if Some(¤t_set_signal_block) == best_set_signal_block.as_ref() => - return next_scheduled_set.map(|scheduled_set| scheduled_set.validators), - None => return next_scheduled_set.map(|scheduled_set| scheduled_set.validators), - Some(current_set_signal_block) => - storage.scheduled_change(¤t_set_signal_block.hash).expect( - "header that is associated with this change is not pruned;\ - scheduled changes are only removed when header is pruned; qed", - ), - }; - - current_set_signal_block = current_scheduled_set.prev_signal_block; - next_scheduled_set = Some(current_scheduled_set); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - insert_header, run_test_with_genesis, test_aura_config, validator, validator_address, - validators_addresses, validators_change_receipt, AccountId, ConstChainTime, - HeaderBuilder, TestRuntime, GAS_LIMIT, - }, - pool_configuration, - validators::ValidatorsSource, - BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId, - ScheduledChanges, ValidatorsSet, ValidatorsSets, - }; - use bp_eth_poa::{compute_merkle_root, rlp_encode, TransactionOutcome, H520, U256}; - use hex_literal::hex; - use libsecp256k1::SecretKey; - use sp_runtime::transaction_validity::TransactionTag; - - const GENESIS_STEP: u64 = 42; - const TOTAL_VALIDATORS: usize = 3; - - fn genesis() -> AuraHeader { - HeaderBuilder::genesis().step(GENESIS_STEP).sign_by(&validator(0)) - } - - fn verify_with_config( - config: &AuraConfiguration, - header: &AuraHeader, - ) -> Result, Error> { - run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - verify_aura_header(&storage, config, None, header, &ConstChainTime::default()) - }) - } - - fn default_verify(header: &AuraHeader) -> Result, Error> { - verify_with_config(&test_aura_config(), header) - } - - fn default_accept_into_pool( - mut make_header: impl FnMut(&[SecretKey]) -> (AuraHeader, Option>), - ) -> Result<(Vec, Vec), Error> { - run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { - let validators = vec![validator(0), validator(1), validator(2)]; - let mut storage = BridgeStorage::::new(); - let block1 = HeaderBuilder::with_parent_number(0).sign_by_set(&validators); - insert_header(&mut storage, block1); - let block2 = HeaderBuilder::with_parent_number(1).sign_by_set(&validators); - let block2_id = block2.compute_id(); - insert_header(&mut storage, block2); - let block3 = HeaderBuilder::with_parent_number(2).sign_by_set(&validators); - insert_header(&mut storage, block3); - - FinalizedBlock::::put(block2_id); - - let validators_config = ValidatorsConfiguration::Single(ValidatorsSource::Contract( - Default::default(), - Vec::new(), - )); - let (header, receipts) = make_header(&validators); - accept_aura_header_into_pool( - &storage, - &test_aura_config(), - &validators_config, - &pool_configuration(), - &header, - &(), - receipts.as_ref(), - ) - }) - } - - fn change_validators_set_at( - number: u64, - finalized_set: Vec
, - signalled_set: Option>, - ) { - let set_id = NextValidatorsSetId::::get(); - NextValidatorsSetId::::put(set_id + 1); - ValidatorsSets::::insert( - set_id, - ValidatorsSet { - validators: finalized_set, - signal_block: None, - enact_block: HeaderId { - number: 0, - hash: HeadersByNumber::::get(&0).unwrap()[0], - }, - }, - ); - - let header_hash = HeadersByNumber::::get(&number).unwrap()[0]; - let mut header = Headers::::get(&header_hash).unwrap(); - header.next_validators_set_id = set_id; - if let Some(signalled_set) = signalled_set { - header.last_signal_block = Some(HeaderId { - number: header.header.number - 1, - hash: header.header.parent_hash, - }); - ScheduledChanges::::insert( - header.header.parent_hash, - AuraScheduledChange { validators: signalled_set, prev_signal_block: None }, - ); - } - - Headers::::insert(header_hash, header); - } - - #[test] - fn verifies_seal_count() { - // when there are no seals at all - let mut header = AuraHeader::default(); - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's single seal (we expect 2 or 3 seals) - header.seal = vec![vec![]]; - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's 3 seals (we expect 2 by default) - header.seal = vec![vec![], vec![], vec![]]; - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's 2 seals - header.seal = vec![vec![], vec![]]; - assert_ne!(default_verify(&header), Err(Error::InvalidSealArity)); - } - - #[test] - fn verifies_header_number() { - // when number is u64::max_value() - let header = HeaderBuilder::with_number(u64::max_value()).sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::RidiculousNumber)); - - // when header is < u64::max_value() - let header = HeaderBuilder::with_number(u64::max_value() - 1).sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::RidiculousNumber)); - } - - #[test] - fn verifies_gas_used() { - // when gas used is larger than gas limit - let header = HeaderBuilder::with_number(1) - .gas_used((GAS_LIMIT + 1).into()) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::TooMuchGasUsed)); - - // when gas used is less than gas limit - let header = HeaderBuilder::with_number(1) - .gas_used((GAS_LIMIT - 1).into()) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::TooMuchGasUsed)); - } - - #[test] - fn verifies_gas_limit() { - let mut config = test_aura_config(); - config.min_gas_limit = 100.into(); - config.max_gas_limit = 200.into(); - - // when limit is lower than expected - let header = HeaderBuilder::with_number(1).gas_limit(50.into()).sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - - // when limit is larger than expected - let header = HeaderBuilder::with_number(1).gas_limit(250.into()).sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - - // when limit is within expected range - let header = HeaderBuilder::with_number(1).gas_limit(150.into()).sign_by(&validator(0)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - } - - #[test] - fn verifies_extra_data_len() { - // when extra data is too large - let header = HeaderBuilder::with_number(1) - .extra_data(std::iter::repeat(42).take(1000).collect::>()) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); - - // when extra data size is OK - let header = HeaderBuilder::with_number(1) - .extra_data(std::iter::repeat(42).take(10).collect::>()) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); - } - - #[test] - fn verifies_timestamp() { - // when timestamp overflows i32 - let header = HeaderBuilder::with_number(1) - .timestamp(i32::max_value() as u64 + 1) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::TimestampOverflow)); - - // when timestamp doesn't overflow i32 - let header = HeaderBuilder::with_number(1) - .timestamp(i32::max_value() as u64) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::TimestampOverflow)); - } - - #[test] - fn verifies_chain_time() { - // expected import context after verification - let expect = ImportContext:: { - submitter: None, - parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3") - .into(), - parent_header: genesis(), - parent_total_difficulty: U256::zero(), - parent_scheduled_change: None, - validators_set_id: 0, - validators_set: ValidatorsSet { - validators: vec![ - hex!("dc5b20847f43d67928f49cd4f85d696b5a7617b5").into(), - hex!("897df33a7b3c62ade01e22c13d48f98124b4480f").into(), - hex!("05c987b34c6ef74e0c7e69c6e641120c24164c2d").into(), - ], - signal_block: None, - enact_block: HeaderId { - number: 0, - hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3") - .into(), - }, - }, - last_signal_block: None, - }; - - // header is behind - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2 - 100) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header).unwrap(), expect); - - // header is ahead - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2 + 100) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header), Err(Error::HeaderTimestampIsAhead)); - - // header has same timestamp as ConstChainTime - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header).unwrap(), expect); - } - - #[test] - fn verifies_parent_existence() { - // when there's no parent in the storage - let header = HeaderBuilder::with_number(1).sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::MissingParentBlock)); - - // when parent is in the storage - let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::MissingParentBlock)); - } - - #[test] - fn verifies_step() { - // when step is missing from seals - let mut header = AuraHeader { - seal: vec![vec![], vec![]], - gas_limit: test_aura_config().min_gas_limit, - parent_hash: genesis().compute_hash(), - ..Default::default() - }; - assert_eq!(default_verify(&header), Err(Error::MissingStep)); - - // when step is the same as for the parent block - header.seal[0] = rlp_encode(&42u64).to_vec(); - assert_eq!(default_verify(&header), Err(Error::DoubleVote)); - - // when step is OK - header.seal[0] = rlp_encode(&43u64).to_vec(); - assert_ne!(default_verify(&header), Err(Error::DoubleVote)); - - // now check with validate_step check enabled - let mut config = test_aura_config(); - config.validate_step_transition = 0; - - // when step is lesser that for the parent block - header.seal[0] = rlp_encode(&40u64).to_vec(); - header.seal = vec![vec![40], vec![]]; - assert_eq!(verify_with_config(&config, &header), Err(Error::DoubleVote)); - - // when step is OK - header.seal[0] = rlp_encode(&44u64).to_vec(); - assert_ne!(verify_with_config(&config, &header), Err(Error::DoubleVote)); - } - - #[test] - fn verifies_empty_step() { - let mut config = test_aura_config(); - config.empty_steps_transition = 0; - - // when empty step duplicates parent step - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(0), GENESIS_STEP)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when empty step signature check fails - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(100), GENESIS_STEP + 1)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when we are accepting strict empty steps and they come not in order - config.strict_empty_steps_transition = 0; - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(2), GENESIS_STEP + 2), (&validator(1), GENESIS_STEP + 1)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when empty steps are OK - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(1), GENESIS_STEP + 1), (&validator(2), GENESIS_STEP + 2)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - } - - #[test] - fn verifies_chain_score() { - let mut config = test_aura_config(); - config.validate_score_transition = 0; - - // when chain score is invalid - let header = HeaderBuilder::with_parent(&genesis()) - .difficulty(100.into()) - .sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); - - // when chain score is accepted - let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); - } - - #[test] - fn verifies_validator() { - let good_header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(1)); - - // when header author is invalid - let mut header = good_header.clone(); - header.author = Default::default(); - assert_eq!(default_verify(&header), Err(Error::NotValidator)); - - // when header signature is invalid - let mut header = good_header.clone(); - header.seal[1] = rlp_encode(&H520::default()).to_vec(); - assert_eq!(default_verify(&header), Err(Error::NotValidator)); - - // when everything is OK - assert_eq!(default_verify(&good_header).map(|_| ()), Ok(())); - } - - #[test] - fn pool_verifies_known_blocks() { - // when header is known - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(2).sign_by_set(validators), - None - )), - Err(Error::KnownHeader), - ); - } - - #[test] - fn pool_verifies_ancient_blocks() { - // when header number is less than finalized - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(validators), - None, - ),), - Err(Error::AncientHeader), - ); - } - - #[test] - fn pool_rejects_headers_without_required_receipts() { - assert_eq!( - default_accept_into_pool(|_| ( - AuraHeader { - number: 20_000_000, - seal: vec![vec![], vec![]], - gas_limit: test_aura_config().min_gas_limit, - log_bloom: (&[0xff; 256]).into(), - ..Default::default() - }, - None, - ),), - Err(Error::MissingTransactionsReceipts), - ); - } - - #[test] - fn pool_rejects_headers_with_redundant_receipts() { - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(3).sign_by_set(validators), - Some(vec![Receipt { - gas_used: 1.into(), - log_bloom: (&[0xff; 256]).into(), - logs: vec![], - outcome: TransactionOutcome::Unknown, - }]), - ),), - Err(Error::RedundantTransactionsReceipts), - ); - } - - #[test] - fn pool_verifies_future_block_number() { - // when header is too far from the future - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_number(100).sign_by_set(validators), - None - ),), - Err(Error::UnsignedTooFarInTheFuture), - ); - } - - #[test] - fn pool_performs_full_verification_when_parent_is_known() { - // if parent is known, then we'll execute contextual_checks, which - // checks for DoubleVote - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(3) - .step(GENESIS_STEP + 3) - .sign_by_set(validators), - None, - ),), - Err(Error::DoubleVote), - ); - } - - #[test] - fn pool_performs_validators_checks_when_parent_is_unknown() { - // if parent is unknown, then we still need to check if header has required signature - // (even if header will be considered invalid/duplicate later, we can use this signature - // as a proof of malicious action by this validator) - assert_eq!( - default_accept_into_pool(|_| ( - HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)), - None, - )), - Err(Error::NotValidator), - ); - } - - #[test] - fn pool_verifies_header_with_known_parent() { - let mut hash = None; - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3).sign_by_set(validators); - hash = Some(header.compute_hash()); - (header, None) - }), - Ok(( - // no tags are required - vec![], - // header provides two tags - vec![(4u64, validators_addresses(3)[1]).encode(), (4u64, hash.unwrap()).encode(),], - )), - ); - } - - #[test] - fn pool_verifies_header_with_unknown_parent() { - let mut id = None; - let mut parent_id = None; - assert_eq!( - default_accept_into_pool(|validators| { - let header = - HeaderBuilder::with_number(5).step(GENESIS_STEP + 5).sign_by_set(validators); - id = Some(header.compute_id()); - parent_id = header.parent_id(); - (header, None) - }), - Ok(( - // parent tag required - vec![parent_id.unwrap().encode()], - // header provides two tags - vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], - )), - ); - } - - #[test] - fn pool_uses_next_validators_set_when_finalized_fails() { - assert_eq!( - default_accept_into_pool(|actual_validators| { - // change finalized set at parent header - change_validators_set_at(3, validators_addresses(1), None); - - // header is signed using wrong set - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 2) - .sign_by_set(actual_validators); - - (header, None) - }), - Err(Error::NotValidator), - ); - - let mut id = None; - let mut parent_id = None; - assert_eq!( - default_accept_into_pool(|actual_validators| { - // change finalized set at parent header + signal valid set at parent block - change_validators_set_at( - 3, - validators_addresses(10), - Some(validators_addresses(3)), - ); - - // header is signed using wrong set - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 2) - .sign_by_set(actual_validators); - id = Some(header.compute_id()); - parent_id = header.parent_id(); - - (header, None) - }), - Ok(( - // parent tag required - vec![parent_id.unwrap().encode(),], - // header provides two tags - vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], - )), - ); - } - - #[test] - fn pool_rejects_headers_with_invalid_receipts() { - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3) - .log_bloom((&[0xff; 256]).into()) - .sign_by_set(validators); - (header, Some(vec![validators_change_receipt(Default::default())])) - }), - Err(Error::TransactionsReceiptsMismatch), - ); - } - - #[test] - fn pool_accepts_headers_with_valid_receipts() { - let mut hash = None; - let receipts = vec![validators_change_receipt(Default::default())]; - let receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); - - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3) - .log_bloom((&[0xff; 256]).into()) - .receipts_root(receipts_root) - .sign_by_set(validators); - hash = Some(header.compute_hash()); - (header, Some(receipts.clone())) - }), - Ok(( - // no tags are required - vec![], - // header provides two tags - vec![(4u64, validators_addresses(3)[1]).encode(), (4u64, hash.unwrap()).encode(),], - )), - ); - } -} diff --git a/modules/grandpa/src/lib.rs b/modules/grandpa/src/lib.rs index 279e23404a05..cbc85da30259 100644 --- a/modules/grandpa/src/lib.rs +++ b/modules/grandpa/src/lib.rs @@ -239,7 +239,7 @@ pub mod pallet { operational: bool, ) -> DispatchResultWithPostInfo { ensure_owner_or_root::(origin)?; - >::put(operational); + >::put(!operational); if operational { log::info!(target: "runtime::bridge-grandpa", "Resuming pallet operations."); @@ -620,9 +620,7 @@ pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader #[cfg(test)] mod tests { use super::*; - use crate::mock::{ - run_test, test_header, Origin, TestHash, TestHeader, TestNumber, TestRuntime, - }; + use crate::mock::{run_test, test_header, Origin, TestHeader, TestNumber, TestRuntime}; use bp_test_utils::{ authority_list, make_default_justification, make_justification_for_header, JustificationGeneratorParams, ALICE, BOB, @@ -672,19 +670,17 @@ mod tests { let _ = Pallet::::on_initialize(current_number); } - fn change_log(delay: u64) -> Digest { + fn change_log(delay: u64) -> Digest { let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], delay, }); - Digest:: { - logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], - } + Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } } - fn forced_change_log(delay: u64) -> Digest { + fn forced_change_log(delay: u64) -> Digest { let consensus_log = ConsensusLog::::ForcedChange( delay, sp_finality_grandpa::ScheduledChange { @@ -693,9 +689,7 @@ mod tests { }, ); - Digest:: { - logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], - } + Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } } #[test] @@ -804,9 +798,13 @@ mod tests { #[test] fn pallet_rejects_transactions_if_halted() { run_test(|| { - >::put(true); + initialize_substrate_bridge(); - assert_noop!(submit_finality_proof(1), Error::::Halted,); + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + assert_noop!(submit_finality_proof(1), Error::::Halted); + + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + assert_ok!(submit_finality_proof(1)); }) } diff --git a/modules/grandpa/src/mock.rs b/modules/grandpa/src/mock.rs index 183a56779156..f8b5e269323f 100644 --- a/modules/grandpa/src/mock.rs +++ b/modules/grandpa/src/mock.rs @@ -29,7 +29,6 @@ use sp_runtime::{ pub type AccountId = u64; pub type TestHeader = crate::BridgedHeader; pub type TestNumber = crate::BridgedBlockNumber; -pub type TestHash = crate::BridgedBlockHash; type Block = frame_system::mocking::MockBlock; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; diff --git a/primitives/chain-rococo/src/lib.rs b/primitives/chain-rococo/src/lib.rs index d6d97fdc5f4e..b3bbc91976da 100644 --- a/primitives/chain-rococo/src/lib.rs +++ b/primitives/chain-rococo/src/lib.rs @@ -44,7 +44,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: sp_version::create_runtime_str!("rococo"), impl_name: sp_version::create_runtime_str!("parity-rococo-v1.6"), authoring_version: 0, - spec_version: 9004, + spec_version: 9100, impl_version: 0, apis: sp_version::create_apis_vec![[]], transaction_version: 0, diff --git a/primitives/currency-exchange/Cargo.toml b/primitives/currency-exchange/Cargo.toml deleted file mode 100644 index 165891f0c6b1..000000000000 --- a/primitives/currency-exchange/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "bp-currency-exchange" -description = "Primitives of currency exchange module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-api/std", - "sp-std/std", -] diff --git a/primitives/currency-exchange/src/lib.rs b/primitives/currency-exchange/src/lib.rs deleted file mode 100644 index 1a30915b1cbf..000000000000 --- a/primitives/currency-exchange/src/lib.rs +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -use codec::{Decode, Encode, EncodeLike}; -use frame_support::{Parameter, RuntimeDebug}; -use scale_info::TypeInfo; -use sp_api::decl_runtime_apis; -use sp_std::marker::PhantomData; - -/// All errors that may happen during exchange. -#[derive(RuntimeDebug, PartialEq)] -pub enum Error { - /// Invalid peer blockchain transaction provided. - InvalidTransaction, - /// Peer transaction has invalid amount. - InvalidAmount, - /// Peer transaction has invalid recipient. - InvalidRecipient, - /// Cannot map from peer recipient to this blockchain recipient. - FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockchain currency. - FailedToConvertCurrency, - /// Deposit has failed. - DepositFailed, - /// Deposit has partially failed (changes to recipient account were made). - DepositPartiallyFailed, -} - -/// Result of all exchange operations. -pub type Result = sp_std::result::Result; - -/// Peer blockchain lock funds transaction. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct LockFundsTransaction { - /// Something that uniquely identifies this transfer. - pub id: TransferId, - /// Funds recipient on the peer chain. - pub recipient: Recipient, - /// Amount of the locked funds. - pub amount: Amount, -} - -/// Peer blockchain transaction that may represent lock funds transaction. -pub trait MaybeLockFundsTransaction { - /// Transaction type. - type Transaction; - /// Identifier that uniquely identifies this transfer. - type Id: Decode + Encode + TypeInfo + EncodeLike + sp_std::fmt::Debug; - /// Peer recipient type. - type Recipient; - /// Peer currency amount type. - type Amount; - - /// Parse lock funds transaction of the peer blockchain. Returns None if - /// transaction format is unknown, or it isn't a lock funds transaction. - fn parse( - tx: &Self::Transaction, - ) -> Result>; -} - -/// Map that maps recipients from peer blockchain to this blockchain recipients. -pub trait RecipientsMap { - /// Peer blockchain recipient type. - type PeerRecipient; - /// Current blockchain recipient type. - type Recipient; - - /// Lookup current blockchain recipient by peer blockchain recipient. - fn map(peer_recipient: Self::PeerRecipient) -> Result; -} - -/// Conversion between two currencies. -pub trait CurrencyConverter { - /// Type of the source currency amount. - type SourceAmount; - /// Type of the target currency amount. - type TargetAmount; - - /// Covert from source to target currency. - fn convert(amount: Self::SourceAmount) -> Result; -} - -/// Currency deposit. -pub trait DepositInto { - /// Recipient type. - type Recipient; - /// Currency amount type. - type Amount; - - /// Grant some money to given account. - fn deposit_into(recipient: Self::Recipient, amount: Self::Amount) -> Result<()>; -} - -/// Recipients map which is used when accounts ids are the same on both chains. -#[derive(Debug)] -pub struct IdentityRecipients(PhantomData); - -impl RecipientsMap for IdentityRecipients { - type PeerRecipient = AccountId; - type Recipient = AccountId; - - fn map(peer_recipient: Self::PeerRecipient) -> Result { - Ok(peer_recipient) - } -} - -/// Currency converter which is used when currency is the same on both chains. -#[derive(Debug)] -pub struct IdentityCurrencyConverter(PhantomData); - -impl CurrencyConverter for IdentityCurrencyConverter { - type SourceAmount = Amount; - type TargetAmount = Amount; - - fn convert(currency: Self::SourceAmount) -> Result { - Ok(currency) - } -} - -decl_runtime_apis! { - /// API for Rialto exchange transactions submitters. - pub trait RialtoCurrencyExchangeApi { - /// Returns true if currency exchange module is able to import transaction proof in - /// its current state. - fn filter_transaction_proof(proof: Proof) -> bool; - } - - /// API for Kovan exchange transactions submitters. - pub trait KovanCurrencyExchangeApi { - /// Returns true if currency exchange module is able to import transaction proof in - /// its current state. - fn filter_transaction_proof(proof: Proof) -> bool; - } -} diff --git a/primitives/ethereum-poa/Cargo.toml b/primitives/ethereum-poa/Cargo.toml deleted file mode 100644 index 71f071bbf0e6..000000000000 --- a/primitives/ethereum-poa/Cargo.toml +++ /dev/null @@ -1,59 +0,0 @@ -[package] -name = "bp-eth-poa" -description = "Primitives of Ethereum PoA Bridge module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } -ethbloom = { version = "0.10.0", default-features = false, features = ["rlp"] } -fixed-hash = { version = "0.7", default-features = false } -hash-db = { version = "0.15.2", default-features = false } -impl-rlp = { version = "0.3", default-features = false } -impl-serde = { version = "0.3.1", optional = true } -libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac", "static-context"] } -parity-bytes = { version = "0.1", default-features = false } -plain_hasher = { version = "0.2.2", default-features = false } -primitive-types = { version = "0.10", default-features = false, features = ["codec", "rlp"] } -rlp = { version = "0.5", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } -serde-big-array = { version = "0.2", optional = true } -triehash = { version = "0.8.2", default-features = false } - -# Substrate Dependencies - -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -hex-literal = "0.2" - -[features] -default = ["std"] -std = [ - "codec/std", - "ethbloom/std", - "fixed-hash/std", - "hash-db/std", - "impl-rlp/std", - "impl-serde", - "libsecp256k1/std", - "parity-bytes/std", - "plain_hasher/std", - "primitive-types/std", - "primitive-types/serde", - "rlp/std", - "scale-info/std", - "serde/std", - "serde-big-array", - "sp-api/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "triehash/std", -] diff --git a/primitives/ethereum-poa/src/lib.rs b/primitives/ethereum-poa/src/lib.rs deleted file mode 100644 index 58f5731e5222..000000000000 --- a/primitives/ethereum-poa/src/lib.rs +++ /dev/null @@ -1,732 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -pub use parity_bytes::Bytes; -pub use primitive_types::{H160, H256, H512, U128, U256}; -pub use rlp::encode as rlp_encode; - -use codec::{Decode, Encode}; -use ethbloom::{Bloom as EthBloom, Input as BloomInput}; -use fixed_hash::construct_fixed_hash; -use rlp::{Decodable, DecoderError, Rlp, RlpStream}; -use scale_info::TypeInfo; -use sp_io::hashing::keccak_256; -use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; - -use impl_rlp::impl_fixed_hash_rlp; -#[cfg(feature = "std")] -use impl_serde::impl_fixed_hash_serde; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use serde_big_array::big_array; - -construct_fixed_hash! { pub struct H520(65); } -impl_fixed_hash_rlp!(H520, 65); -#[cfg(feature = "std")] -impl_fixed_hash_serde!(H520, 65); - -/// Raw (RLP-encoded) ethereum transaction. -pub type RawTransaction = Vec; - -/// Raw (RLP-encoded) ethereum transaction receipt. -pub type RawTransactionReceipt = Vec; - -/// An ethereum address. -pub type Address = H160; - -pub mod signatures; - -/// Complete header id. -#[derive(Encode, Decode, Default, RuntimeDebug, PartialEq, Clone, Copy, TypeInfo)] -pub struct HeaderId { - /// Header number. - pub number: u64, - /// Header hash. - pub hash: H256, -} - -/// An Aura header. -#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct AuraHeader { - /// Parent block hash. - pub parent_hash: H256, - /// Block timestamp. - pub timestamp: u64, - /// Block number. - pub number: u64, - /// Block author. - pub author: Address, - - /// Transactions root. - pub transactions_root: H256, - /// Block uncles hash. - pub uncles_hash: H256, - /// Block extra data. - pub extra_data: Bytes, - - /// State root. - pub state_root: H256, - /// Block receipts root. - pub receipts_root: H256, - /// Block bloom. - pub log_bloom: Bloom, - /// Gas used for contracts execution. - pub gas_used: U256, - /// Block gas limit. - pub gas_limit: U256, - - /// Block difficulty. - pub difficulty: U256, - /// Vector of post-RLP-encoded fields. - pub seal: Vec, -} - -/// Parsed ethereum transaction. -#[derive(PartialEq, RuntimeDebug)] -pub struct Transaction { - /// Sender address. - pub sender: Address, - /// Unsigned portion of ethereum transaction. - pub unsigned: UnsignedTransaction, -} - -/// Unsigned portion of ethereum transaction. -#[derive(Clone, PartialEq, RuntimeDebug)] -pub struct UnsignedTransaction { - /// Sender nonce. - pub nonce: U256, - /// Gas price. - pub gas_price: U256, - /// Gas limit. - pub gas: U256, - /// Transaction destination address. None if it is contract creation transaction. - pub to: Option
, - /// Value. - pub value: U256, - /// Associated data. - pub payload: Bytes, -} - -/// Information describing execution of a transaction. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct Receipt { - /// The total gas used in the block following execution of the transaction. - pub gas_used: U256, - /// The OR-wide combination of all logs' blooms for this transaction. - pub log_bloom: Bloom, - /// The logs stemming from this transaction. - pub logs: Vec, - /// Transaction outcome. - pub outcome: TransactionOutcome, -} - -/// Transaction outcome store in the receipt. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub enum TransactionOutcome { - /// Status and state root are unknown under EIP-98 rules. - Unknown, - /// State root is known. Pre EIP-98 and EIP-658 rules. - StateRoot(H256), - /// Status code is known. EIP-658 rules. - StatusCode(u8), -} - -/// A record of execution for a `LOG` operation. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct LogEntry { - /// The address of the contract executing at the point of the `LOG` operation. - pub address: Address, - /// The topics associated with the `LOG` operation. - pub topics: Vec, - /// The data associated with the `LOG` operation. - pub data: Bytes, -} - -/// Logs bloom. -#[derive(Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct Bloom(#[cfg_attr(feature = "std", serde(with = "BigArray"))] [u8; 256]); - -#[cfg(feature = "std")] -big_array! { BigArray; } - -/// An empty step message that is included in a seal, the only difference is that it doesn't include -/// the `parent_hash` in order to save space. The included signature is of the original empty step -/// message, which can be reconstructed by using the parent hash of the block in which this sealed -/// empty message is included. -pub struct SealedEmptyStep { - /// Signature of the original message author. - pub signature: H520, - /// The step this message is generated for. - pub step: u64, -} - -impl AuraHeader { - /// Compute id of this header. - pub fn compute_id(&self) -> HeaderId { - HeaderId { number: self.number, hash: self.compute_hash() } - } - - /// Compute hash of this header (keccak of the RLP with seal). - pub fn compute_hash(&self) -> H256 { - keccak_256(&self.rlp(true)).into() - } - - /// Get id of this header' parent. Returns None if this is genesis header. - pub fn parent_id(&self) -> Option { - self.number - .checked_sub(1) - .map(|parent_number| HeaderId { number: parent_number, hash: self.parent_hash }) - } - - /// Check if passed transactions receipts are matching receipts root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_receipts_root(&self, receipts: &[Receipt]) -> Result { - check_merkle_proof(self.receipts_root, receipts.iter().map(|r| r.rlp())) - } - - /// Check if passed raw transactions receipts are matching receipts root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_raw_receipts_root<'a>( - &self, - receipts: impl IntoIterator, - ) -> Result { - check_merkle_proof(self.receipts_root, receipts.into_iter()) - } - - /// Check if passed transactions are matching transactions root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_transactions_root<'a>( - &self, - transactions: impl IntoIterator, - ) -> Result { - check_merkle_proof(self.transactions_root, transactions.into_iter()) - } - - /// Gets the seal hash of this header. - pub fn seal_hash(&self, include_empty_steps: bool) -> Option { - Some(match include_empty_steps { - true => { - let mut message = self.compute_hash().as_bytes().to_vec(); - message.extend_from_slice(self.seal.get(2)?); - keccak_256(&message).into() - }, - false => keccak_256(&self.rlp(false)).into(), - }) - } - - /// Get step this header is generated for. - pub fn step(&self) -> Option { - self.seal.get(0).map(|x| Rlp::new(x)).and_then(|x| x.as_val().ok()) - } - - /// Get header author' signature. - pub fn signature(&self) -> Option { - self.seal.get(1).and_then(|x| Rlp::new(x).as_val().ok()) - } - - /// Extracts the empty steps from the header seal. - pub fn empty_steps(&self) -> Option> { - self.seal.get(2).and_then(|x| Rlp::new(x).as_list::().ok()) - } - - /// Returns header RLP with or without seals. - fn rlp(&self, with_seal: bool) -> Bytes { - let mut s = RlpStream::new(); - if with_seal { - s.begin_list(13 + self.seal.len()); - } else { - s.begin_list(13); - } - - s.append(&self.parent_hash); - s.append(&self.uncles_hash); - s.append(&self.author); - s.append(&self.state_root); - s.append(&self.transactions_root); - s.append(&self.receipts_root); - s.append(&EthBloom::from(self.log_bloom.0)); - s.append(&self.difficulty); - s.append(&self.number); - s.append(&self.gas_limit); - s.append(&self.gas_used); - s.append(&self.timestamp); - s.append(&self.extra_data); - - if with_seal { - for b in &self.seal { - s.append_raw(b, 1); - } - } - - s.out().to_vec() - } -} - -impl UnsignedTransaction { - /// Decode unsigned portion of raw transaction RLP. - pub fn decode_rlp(raw_tx: &[u8]) -> Result { - let tx_rlp = Rlp::new(raw_tx); - let to = tx_rlp.at(3)?; - Ok(UnsignedTransaction { - nonce: tx_rlp.val_at(0)?, - gas_price: tx_rlp.val_at(1)?, - gas: tx_rlp.val_at(2)?, - to: match to.is_empty() { - false => Some(to.as_val()?), - true => None, - }, - value: tx_rlp.val_at(4)?, - payload: tx_rlp.val_at(5)?, - }) - } - - /// Returns message that has to be signed to sign this transaction. - pub fn message(&self, chain_id: Option) -> H256 { - keccak_256(&self.rlp(chain_id)).into() - } - - /// Returns unsigned transaction RLP. - pub fn rlp(&self, chain_id: Option) -> Bytes { - let mut stream = RlpStream::new_list(if chain_id.is_some() { 9 } else { 6 }); - self.rlp_to(chain_id, &mut stream); - stream.out().to_vec() - } - - /// Encode to given RLP stream. - pub fn rlp_to(&self, chain_id: Option, stream: &mut RlpStream) { - stream.append(&self.nonce); - stream.append(&self.gas_price); - stream.append(&self.gas); - match self.to { - Some(to) => stream.append(&to), - None => stream.append(&""), - }; - stream.append(&self.value); - stream.append(&self.payload); - if let Some(chain_id) = chain_id { - stream.append(&chain_id); - stream.append(&0u8); - stream.append(&0u8); - } - } -} - -impl Receipt { - /// Decode status from raw transaction receipt RLP. - pub fn is_successful_raw_receipt(raw_receipt: &[u8]) -> Result { - let rlp = Rlp::new(raw_receipt); - if rlp.item_count()? == 3 { - // no outcome - invalid tx? - Ok(false) - } else { - let first = rlp.at(0)?; - if first.is_data() && first.data()?.len() <= 1 { - // EIP-658 transaction - status of successful transaction is 1 - let status: u8 = first.as_val()?; - Ok(status == 1) - } else { - // pre-EIP-658 transaction - we do not support this kind of transactions - Ok(false) - } - } - } - - /// Returns receipt RLP. - pub fn rlp(&self) -> Bytes { - let mut s = RlpStream::new(); - match self.outcome { - TransactionOutcome::Unknown => { - s.begin_list(3); - }, - TransactionOutcome::StateRoot(ref root) => { - s.begin_list(4); - s.append(root); - }, - TransactionOutcome::StatusCode(ref status_code) => { - s.begin_list(4); - s.append(status_code); - }, - } - s.append(&self.gas_used); - s.append(&EthBloom::from(self.log_bloom.0)); - - s.begin_list(self.logs.len()); - for log in &self.logs { - s.begin_list(3); - s.append(&log.address); - s.begin_list(log.topics.len()); - for topic in &log.topics { - s.append(topic); - } - s.append(&log.data); - } - - s.out().to_vec() - } -} - -impl SealedEmptyStep { - /// Returns message that has to be signed by the validator. - pub fn message(&self, parent_hash: &H256) -> H256 { - let mut message = RlpStream::new_list(2); - message.append(&self.step); - message.append(parent_hash); - keccak_256(&message.out()).into() - } - - /// Returns RLP for the vector of empty steps (we only do encoding in tests). - pub fn rlp_of(empty_steps: &[SealedEmptyStep]) -> Bytes { - let mut s = RlpStream::new(); - s.begin_list(empty_steps.len()); - for empty_step in empty_steps { - s.begin_list(2).append(&empty_step.signature).append(&empty_step.step); - } - s.out().to_vec() - } -} - -impl Decodable for SealedEmptyStep { - fn decode(rlp: &Rlp) -> Result { - let signature: H520 = rlp.val_at(0)?; - let step = rlp.val_at(1)?; - - Ok(SealedEmptyStep { signature, step }) - } -} - -impl LogEntry { - /// Calculates the bloom of this log entry. - pub fn bloom(&self) -> Bloom { - let eth_bloom = self.topics.iter().fold( - EthBloom::from(BloomInput::Raw(self.address.as_bytes())), - |mut b, t| { - b.accrue(BloomInput::Raw(t.as_bytes())); - b - }, - ); - Bloom(*eth_bloom.data()) - } -} - -impl Bloom { - /// Returns true if this bloom has all bits from the other set. - pub fn contains(&self, other: &Bloom) -> bool { - self.0.iter().zip(other.0.iter()).all(|(l, r)| (l & r) == *r) - } -} - -impl<'a> From<&'a [u8; 256]> for Bloom { - fn from(buffer: &'a [u8; 256]) -> Bloom { - Bloom(*buffer) - } -} - -impl PartialEq for Bloom { - fn eq(&self, other: &Bloom) -> bool { - self.0.iter().zip(other.0.iter()).all(|(l, r)| l == r) - } -} - -// there's no default for [_; 256], but clippy still complains -#[allow(clippy::derivable_impls)] -impl Default for Bloom { - fn default() -> Self { - Bloom([0; 256]) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Debug for Bloom { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Bloom").finish() - } -} - -/// Decode Ethereum transaction. -pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result { - // parse transaction fields - let unsigned = UnsignedTransaction::decode_rlp(raw_tx)?; - let tx_rlp = Rlp::new(raw_tx); - let v: u64 = tx_rlp.val_at(6)?; - let r: U256 = tx_rlp.val_at(7)?; - let s: U256 = tx_rlp.val_at(8)?; - - // reconstruct signature - let mut signature = [0u8; 65]; - let (chain_id, v) = match v { - v if v == 27u64 => (None, 0), - v if v == 28u64 => (None, 1), - v if v >= 35u64 => (Some((v - 35) / 2), ((v - 1) % 2) as u8), - _ => (None, 4), - }; - r.to_big_endian(&mut signature[0..32]); - s.to_big_endian(&mut signature[32..64]); - signature[64] = v; - - // reconstruct message that has been signed - let message = unsigned.message(chain_id); - - // recover tx sender - let sender_public = - sp_io::crypto::secp256k1_ecdsa_recover(&signature, message.as_fixed_bytes()) - .map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?; - let sender_address = public_to_address(&sender_public); - - Ok(Transaction { sender: sender_address, unsigned }) -} - -/// Convert public key into corresponding ethereum address. -pub fn public_to_address(public: &[u8; 64]) -> Address { - let hash = keccak_256(public); - let mut result = Address::zero(); - result.as_bytes_mut().copy_from_slice(&hash[12..]); - result -} - -/// Check ethereum merkle proof. -/// Returns Ok(computed-root) if check succeeds. -/// Returns Err(computed-root) if check fails. -fn check_merkle_proof>( - expected_root: H256, - items: impl Iterator, -) -> Result { - let computed_root = compute_merkle_root(items); - if computed_root == expected_root { - Ok(computed_root) - } else { - Err(computed_root) - } -} - -/// Compute ethereum merkle root. -pub fn compute_merkle_root>(items: impl Iterator) -> H256 { - struct Keccak256Hasher; - - impl hash_db::Hasher for Keccak256Hasher { - type Out = H256; - type StdHasher = plain_hasher::PlainHasher; - const LENGTH: usize = 32; - fn hash(x: &[u8]) -> Self::Out { - keccak_256(x).into() - } - } - - triehash::ordered_trie_root::(items) -} - -/// Get validator that should author the block at given step. -pub fn step_validator(header_validators: &[T], header_step: u64) -> &T { - &header_validators[(header_step % header_validators.len() as u64) as usize] -} - -sp_api::decl_runtime_apis! { - /// API for querying information about headers from the Rialto Bridge Pallet - pub trait RialtoPoAHeaderApi { - /// Returns number and hash of the best block known to the bridge module. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_block() -> (u64, H256); - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (u64, H256); - /// Returns true if the import of given block requires transactions receipts. - fn is_import_requires_receipts(header: AuraHeader) -> bool; - /// Returns true if header is known to the runtime. - fn is_known_block(hash: H256) -> bool; - } - - /// API for querying information about headers from the Kovan Bridge Pallet - pub trait KovanHeaderApi { - /// Returns number and hash of the best block known to the bridge module. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_block() -> (u64, H256); - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (u64, H256); - /// Returns true if the import of given block requires transactions receipts. - fn is_import_requires_receipts(header: AuraHeader) -> bool; - /// Returns true if header is known to the runtime. - fn is_known_block(hash: H256) -> bool; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn transfer_transaction_decode_works() { - // value transfer transaction - // https://etherscan.io/tx/0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd - // https://etherscan.io/getRawTx?tx=0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd - let raw_tx = hex!("f86c0a85046c7cfe0083016dea94d1310c1e038bc12865d3d3997275b3e4737c6302880b503be34d9fe80080269fc7eaaa9c21f59adf8ad43ed66cf5ef9ee1c317bd4d32cd65401e7aaca47cfaa0387d79c65b90be6260d09dcfb780f29dd8133b9b1ceb20b83b7e442b4bfc30cb"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("67835910d32600471f388a137bbff3eb07993c04").into(), - unsigned: UnsignedTransaction { - nonce: 10.into(), - gas_price: 19000000000u64.into(), - gas: 93674.into(), - to: Some(hex!("d1310c1e038bc12865d3d3997275b3e4737c6302").into()), - value: 815217380000000000_u64.into(), - payload: Default::default(), - } - }), - ); - - // Kovan value transfer transaction - // https://kovan.etherscan.io/tx/0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da - // https://kovan.etherscan.io/getRawTx?tx=0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da - let raw_tx = hex!("f86a822816808252089470c1ccde719d6f477084f07e4137ab0e55f8369f8930cf46e92063afd8008078a00e4d1f4d8aa992bda3c105ff3d6e9b9acbfd99facea00985e2131029290adbdca028ea29a46a4b66ec65b454f0706228e3768cb0ecf755f67c50ddd472f11d5994"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), - unsigned: UnsignedTransaction { - nonce: 10262.into(), - gas_price: 0.into(), - gas: 21000.into(), - to: Some(hex!("70c1ccde719d6f477084f07e4137ab0e55f8369f").into()), - value: 900379597077600000000_u128.into(), - payload: Default::default(), - }, - }), - ); - } - - #[test] - fn payload_transaction_decode_works() { - // contract call transaction - // https://etherscan.io/tx/0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 - // https://etherscan.io/getRawTx?tx=0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 - let raw_tx = hex!("f8aa76850430e234008301500094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b4025a0964e023999621dc3d4d831c43c71f7555beb6d1192dee81a3674b3f57e310f21a00f229edd86f841d1ee4dc48cc16667e2283817b1d39bae16ced10cd206ae4fd4"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("2b9a4d37bdeecdf994c4c9ad7f3cf8dc632f7d70").into(), - unsigned: UnsignedTransaction { - nonce: 118.into(), - gas_price: 18000000000u64.into(), - gas: 86016.into(), - to: Some(hex!("dac17f958d2ee523a2206206994597c13d831ec7").into()), - value: 0.into(), - payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec(), - }, - }), - ); - - // Kovan contract call transaction - // https://kovan.etherscan.io/tx/0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf - // https://kovan.etherscan.io/getRawTx?tx=0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf - let raw_tx = hex!("f8ac8302200b843b9aca00830271009484dd11eb2a29615303d18149c0dbfa24167f896680b844a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b600000000000000000000000000000000000000000000000000000000000027101ba0ce126d2cca81f5e245f292ff84a0d915c0a4ac52af5c51219db1e5d36aa8da35a0045298b79dac631907403888f9b04c2ab5509fe0cc31785276d30a40b915fcf9"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("617da121abf03d4c1af572f5a4e313e26bef7bdc").into(), - unsigned: UnsignedTransaction { - nonce: 139275.into(), - gas_price: 1000000000.into(), - gas: 160000.into(), - to: Some(hex!("84dd11eb2a29615303d18149c0dbfa24167f8966").into()), - value: 0.into(), - payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec(), - }, - }), - ); - } - - #[test] - fn is_successful_raw_receipt_works() { - assert!(Receipt::is_successful_raw_receipt(&[]).is_err()); - - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::Unknown, - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StateRoot(Default::default()), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StatusCode(0), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StatusCode(1), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(true), - ); - } - - #[test] - fn is_successful_raw_receipt_with_empty_data() { - let mut stream = RlpStream::new(); - stream.begin_list(4); - stream.append_empty_data(); - stream.append(&1u64); - stream.append(&2u64); - stream.append(&3u64); - - assert_eq!(Receipt::is_successful_raw_receipt(&stream.out()), Ok(false),); - } -} diff --git a/primitives/ethereum-poa/src/signatures.rs b/primitives/ethereum-poa/src/signatures.rs deleted file mode 100644 index 26371f2166ad..000000000000 --- a/primitives/ethereum-poa/src/signatures.rs +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . -// - -//! Helpers related to signatures. -//! -//! Used for testing and benchmarking. - -// reexport to avoid direct secp256k1 deps by other crates -pub use libsecp256k1::SecretKey; - -use crate::{ - public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction, - UnsignedTransaction, H256, H520, U256, -}; - -use libsecp256k1::{Message, PublicKey}; - -/// Utilities for signing headers. -pub trait SignHeader { - /// Signs header by given author. - fn sign_by(self, author: &SecretKey) -> AuraHeader; - /// Signs header by given authors set. - fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader; -} - -/// Utilities for signing transactions. -pub trait SignTransaction { - /// Sign transaction by given author. - fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction; -} - -impl SignHeader for AuraHeader { - fn sign_by(mut self, author: &SecretKey) -> Self { - self.author = secret_to_address(author); - - let message = self.seal_hash(false).unwrap(); - let signature = sign(author, message); - self.seal[1] = rlp_encode(&signature).to_vec(); - self - } - - fn sign_by_set(self, authors: &[SecretKey]) -> Self { - let step = self.step().unwrap(); - let author = step_validator(authors, step); - self.sign_by(author) - } -} - -impl SignTransaction for UnsignedTransaction { - fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction { - let message = self.message(chain_id); - let signature = sign(author, message); - let signature_r = U256::from_big_endian(&signature.as_fixed_bytes()[..32][..]); - let signature_s = U256::from_big_endian(&signature.as_fixed_bytes()[32..64][..]); - let signature_v = signature.as_fixed_bytes()[64] as u64; - let signature_v = signature_v + if let Some(n) = chain_id { 35 + n * 2 } else { 27 }; - - let mut stream = rlp::RlpStream::new_list(9); - self.rlp_to(None, &mut stream); - stream.append(&signature_v); - stream.append(&signature_r); - stream.append(&signature_s); - stream.out().to_vec() - } -} - -/// Return author's signature over given message. -pub fn sign(author: &SecretKey, message: H256) -> H520 { - let (signature, recovery_id) = - libsecp256k1::sign(&Message::parse(message.as_fixed_bytes()), author); - let mut raw_signature = [0u8; 65]; - raw_signature[..64].copy_from_slice(&signature.serialize()); - raw_signature[64] = recovery_id.serialize(); - raw_signature.into() -} - -/// Returns address corresponding to given secret key. -pub fn secret_to_address(secret: &SecretKey) -> Address { - let public = PublicKey::from_secret_key(secret); - let mut raw_public = [0u8; 64]; - raw_public.copy_from_slice(&public.serialize()[1..]); - public_to_address(&raw_public) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{transaction_decode_rlp, Transaction}; - - #[test] - fn transaction_signed_properly() { - // case1: with chain_id replay protection + to - let signer = SecretKey::parse(&[1u8; 32]).unwrap(); - let signer_address = secret_to_address(&signer); - let unsigned = UnsignedTransaction { - nonce: 100.into(), - gas_price: 200.into(), - gas: 300.into(), - to: Some([42u8; 20].into()), - value: 400.into(), - payload: vec![1, 2, 3], - }; - let raw_tx = unsigned.clone().sign_by(&signer, Some(42)); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { sender: signer_address, unsigned }), - ); - - // case2: without chain_id replay protection + contract creation - let unsigned = UnsignedTransaction { - nonce: 100.into(), - gas_price: 200.into(), - gas: 300.into(), - to: None, - value: 400.into(), - payload: vec![1, 2, 3], - }; - let raw_tx = unsigned.clone().sign_by(&signer, None); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { sender: signer_address, unsigned }), - ); - } -} diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 944f84a6c683..7cc165fb4e9c 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -22,6 +22,9 @@ sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = " sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +[dev-dependencies] +hex-literal = "0.3" + [features] default = ["std"] std = [ diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 460f1b19dfe3..051dc1f43c00 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -256,10 +256,22 @@ pub fn storage_map_final_key_identity( /// /// Copied from `frame_support::parameter_types` macro pub fn storage_parameter_key(parameter_name: &str) -> StorageKey { - let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1 + 1); + let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1); buffer.push(b':'); buffer.extend_from_slice(parameter_name.as_bytes()); buffer.push(b':'); - buffer.push(0); StorageKey(sp_io::hashing::twox_128(&buffer).to_vec()) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn storage_parameter_key_works() { + assert_eq!( + storage_parameter_key("MillauToRialtoConversionRate"), + StorageKey(hex_literal::hex!("58942375551bb0af1682f72786b59d04").to_vec()), + ); + } +} diff --git a/relays/bin-ethereum/Cargo.toml b/relays/bin-ethereum/Cargo.toml deleted file mode 100644 index 610dee2c3ce9..000000000000 --- a/relays/bin-ethereum/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "ethereum-poa-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -anyhow = "1.0" -async-std = "1.9.0" -async-trait = "0.1.42" -clap = { version = "2.33.3", features = ["yaml"] } -codec = { package = "parity-scale-codec", version = "2.2.0" } -ethabi = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" } -ethabi-contract = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" } -ethabi-derive = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" } -futures = "0.3.12" -hex = "0.4" -hex-literal = "0.3" -libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"] } -log = "0.4.14" -num-traits = "0.2" -serde_json = "1.0.64" -thiserror = "1.0.26" - -# Bridge dependencies - -bp-currency-exchange = { path = "../../primitives/currency-exchange" } -bp-eth-poa = { path = "../../primitives/ethereum-poa" } -exchange-relay = { path = "../exchange" } -headers-relay = { path = "../headers" } -relay-ethereum-client = { path = "../client-ethereum" } -relay-rialto-client = { path = "../client-rialto" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/relays/bin-ethereum/README.md b/relays/bin-ethereum/README.md deleted file mode 100644 index 9fe2f623fd05..000000000000 --- a/relays/bin-ethereum/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# PoA <> Substrate Bridge - -**DISCLAIMER:** *we recommend not using the bridge in "production" (to bridge significant amounts) just yet. -it's missing a code audit and should still be considered alpha. we can't rule out that there are bugs that might result in loss of the bridged amounts. -we'll update this disclaimer once that changes* - -These docs are very incomplete yet. Describe high-level goals here in the (near) future. diff --git a/relays/bin-ethereum/res/substrate-bridge-abi.json b/relays/bin-ethereum/res/substrate-bridge-abi.json deleted file mode 100644 index b7d7b4b9152c..000000000000 --- a/relays/bin-ethereum/res/substrate-bridge-abi.json +++ /dev/null @@ -1,167 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawInitialHeader", - "type": "bytes" - }, - { - "internalType": "uint64", - "name": "initialValidatorsSetId", - "type": "uint64" - }, - { - "internalType": "bytes", - "name": "initialValidatorsSet", - "type": "bytes" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "stateMutability": "nonpayable", - "type": "fallback" - }, - { - "inputs": [], - "name": "bestKnownHeader", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "finalityTargetNumber", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "finalityTargetHash", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "rawFinalityProof", - "type": "bytes" - } - ], - "name": "importFinalityProof", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawHeader1", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader2", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader3", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader4", - "type": "bytes" - } - ], - "name": "importHeaders", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "incompleteHeaders", - "outputs": [ - { - "internalType": "uint256[]", - "name": "", - "type": "uint256[]" - }, - { - "internalType": "bytes32[]", - "name": "", - "type": "bytes32[]" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawHeader1", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader2", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader3", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader4", - "type": "bytes" - } - ], - "name": "isIncompleteHeaders", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "headerHash", - "type": "bytes32" - } - ], - "name": "isKnownHeader", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - } -] diff --git a/relays/bin-ethereum/res/substrate-bridge-bytecode.hex b/relays/bin-ethereum/res/substrate-bridge-bytecode.hex deleted file mode 100644 index 6dd6a33046f6..000000000000 --- a/relays/bin-ethereum/res/substrate-bridge-bytecode.hex +++ /dev/null @@ -1 +0,0 @@ -60806040523480156200001157600080fd5b5060405162001af838038062001af8833981810160405260608110156200003757600080fd5b81019080805160405193929190846401000000008211156200005857600080fd5b9083019060208201858111156200006e57600080fd5b82516401000000008111828201881017156200008957600080fd5b82525081516020918201929091019080838360005b83811015620000b85781810151838201526020016200009e565b50505050905090810190601f168015620000e65780820380516001836020036101000a031916815260200191505b506040818152602083015192018051929491939192846401000000008211156200010f57600080fd5b9083019060208201858111156200012557600080fd5b82516401000000008111828201881017156200014057600080fd5b82525081516020918201929091019080838360005b838110156200016f57818101518382015260200162000155565b50505050905090810190601f1680156200019d5780820380516001836020036101000a031916815260200191505b50604052505050620001ae620003d5565b620001c2846001600160e01b03620002dc16565b805160008181556002918255604080840180516001908155825160e08101845281815260208088015181830190815293518286019081526080808a0151606085019081526001600160401b038e169185019190915260a0840188905260c084018890528951885260078352959096208251815460ff191690151517815593519284019290925593519482019490945590518051949550919390926200026f9260038501929101906200040a565b506080820151600482810180546001600160401b03199081166001600160401b039485161790915560a0850151600585015560c09094015160069093019290925560038054909316908616179091558251620002d1919060208501906200040a565b5050505050620004af565b620002e6620003d5565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa6200031c57600080fd5b84519b5083519a50825199508151985080519750505050505050506060816001600160401b03811180156200035057600080fd5b506040519080825280601f01601f1916602001820160405280156200037c576020820181803683370190505b5090508115620003a85787516020890160208301848184846011600019fa620003a457600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200044d57805160ff19168380011785556200047d565b828001600101855582156200047d579182015b828111156200047d57825182559160200191906001019062000460565b506200048b9291506200048f565b5090565b620004ac91905b808211156200048b576000815560010162000496565b90565b61163980620004bf6000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c8063374c2c26146100675780636a742c0914610108578063871ebe181461033d578063d96a2deb1461036e578063e8ffbe841461038f578063fae71ae8146105d4575b600080fd5b61006f610684565b604051808060200180602001838103835285818151815260200191508051906020019060200280838360005b838110156100b357818101518382015260200161009b565b50505050905001838103825284818151815260200191508051906020019060200280838360005b838110156100f25781810151838201526020016100da565b5050505090500194505050505060405180910390f35b61033b6004803603608081101561011e57600080fd5b810190602081018135600160201b81111561013857600080fd5b82018360208201111561014a57600080fd5b803590602001918460018302840111600160201b8311171561016b57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156101bd57600080fd5b8201836020820111156101cf57600080fd5b803590602001918460018302840111600160201b831117156101f057600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561024257600080fd5b82018360208201111561025457600080fd5b803590602001918460018302840111600160201b8311171561027557600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156102c757600080fd5b8201836020820111156102d957600080fd5b803590602001918460018302840111600160201b831117156102fa57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610789945050505050565b005b61035a6004803603602081101561035357600080fd5b50356107e5565b604080519115158252519081900360200190f35b6103766107fd565b6040805192835260208301919091528051918290030190f35b6105c2600480360360808110156103a557600080fd5b810190602081018135600160201b8111156103bf57600080fd5b8201836020820111156103d157600080fd5b803590602001918460018302840111600160201b831117156103f257600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561044457600080fd5b82018360208201111561045657600080fd5b803590602001918460018302840111600160201b8311171561047757600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156104c957600080fd5b8201836020820111156104db57600080fd5b803590602001918460018302840111600160201b831117156104fc57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561054e57600080fd5b82018360208201111561056057600080fd5b803590602001918460018302840111600160201b8311171561058157600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610815945050505050565b60408051918252519081900360200190f35b61033b600480360360608110156105ea57600080fd5b813591602081013591810190606081016040820135600160201b81111561061057600080fd5b82018360208201111561062257600080fd5b803590602001918460018302840111600160201b8311171561064357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610b28945050505050565b6005546060908190818167ffffffffffffffff811180156106a457600080fd5b506040519080825280602002602001820160405280156106ce578160200160208202803683370190505b50905060005b828110156107295760076000600583815481106106ed57fe5b906000526020600020015481526020019081526020016000206002015482828151811061071657fe5b60209081029190910101526001016106d4565b508060058080548060200260200160405190810160405280929190818152602001828054801561077857602002820191906000526020600020905b815481526020019060010190808311610764575b505050505090509350935050509091565b61079284610d8d565b61079b576107df565b8251156107b4576107ab83610d8d565b6107b4576107df565b8151156107cd576107c482610d8d565b6107cd576107df565b8051156107df576107dd81610d8d565b505b50505050565b60008181526007602052604090205460ff165b919050565b60008054808252600760205260409091206002015491565b600061081f611454565b61082886610f0e565b9050610832611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156108f85780601f106108cd576101008083540402835291602001916108f8565b820191906000526020600020905b8154815290600101906020018083116108db57829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506000806109398484611001565b945050505091506000600681111561094d57fe5b82600681111561095957fe5b146109ab576040805162461bcd60e51b815260206004820152601860248201527f43616e277420696d706f727420616e7920686561646572730000000000000000604482015290519081900360640190fd5b83604001518114156109c4576001945050505050610b20565b87516109d7576000945050505050610b20565b6109df611489565b6109e98585611171565b90506109f3611454565b6109fc8a610f0e565b90506000610a0a8284611001565b9450505050508160400151811415610a2c576002975050505050505050610b20565b8951610a42576000975050505050505050610b20565b610a4a611489565b610a548388611171565b9050610a5e611454565b610a678c610f0e565b90506000610a758284611001565b9450505050508160400151811415610a9a5760039a5050505050505050505050610b20565b8b51610ab35760009a5050505050505050505050610b20565b610abb611489565b610ac5838b611171565b9050610acf611454565b610ad88e610f0e565b90506000610ae68284611001565b9450505050508160400151811415610b0e5760049d5050505050505050505050505050610b20565b60009d50505050505050505050505050505b949350505050565b6000828152600760205260409020600201548314610b775760405162461bcd60e51b815260040180806020018281038252602f8152602001806115d5602f913960400191505060405180910390fd5b60028054600354600480546040805160206101006001851615026000190190931696909604601f81018390048302870183019091528086529394600094610c28948a948a9467ffffffffffffffff90921693929091830182828015610c1d5780601f10610bf257610100808354040283529160200191610c1d565b820191906000526020600020905b815481529060010190602001808311610c0057829003601f168201915b5050505050876111d0565b600081815260076020526040902060028281558101546001559091505b828214610d8557506000818152600760209081526040808320600181015460069093529220549092908015610d07576005546000199182019181018214610cd357600060056001830381548110610c9857fe5b906000526020600020015490508060058481548110610cb357fe5b600091825260208083209091019290925591825260069052604090208290555b6005805480610cde57fe5b600082815260208082208301600019908101839055909201909255848252600690526040812055505b826006015483600201541415610d7e57600583015460009081526007602052604090206003805467ffffffffffffffff198116600167ffffffffffffffff92831681019092161782559082018054610d759260049291600261010092821615929092026000190116046114c4565b50505050610d85565b5050610c45565b505050505050565b600080610d98611454565b6000806000610da687611312565b9398509196509450925090506000856006811115610dc057fe5b14610dd3576000955050505050506107f8565b604084015181148015610e27576005805486516001820180845560009384527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0909201558651825260066020526040909120555b6040805160e0810182526001808252602088810151818401908152898501518486019081526080808c01516060870190815267ffffffffffffffff8c169187019190915260a086018a905260c086018990528b51600090815260078552969096208551815460ff1916901515178155915193820193909355915160028301559251805192939192610ebe9260038501920190611549565b50608082015160048201805467ffffffffffffffff191667ffffffffffffffff90921691909117905560a0820151600582015560c090910151600690910155935160005550509015949350505050565b610f16611454565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa610f4b57600080fd5b84519b5083519a508251995081519850805197505050505050505060608167ffffffffffffffff81118015610f7f57600080fd5b506040519080825280601f01601f191660200182016040528015610faa576020820181803683370190505b5090508115610fd45787516020890160208301848184846011600019fa610fd057600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b600061100b611454565b83516000908152600760205260408120548190819060ff161561103d5750600193508592506000915081905080611167565b60015487604001511161105f5750600293508592506000915081905080611167565b8551158061107857506001876040015103866040015114155b156110925750600393508592506000915081905080611167565b60c0860151158015906110ac575085604001518660c00151145b156110d3578660200151600254146110d35750600493508592506000915081905080611167565b60808087015160a088015160c0890151928a01515191929091156111585767ffffffffffffffff838116141561111d57506005965088955060009450849350839250611167915050565b8960400151811061114257506006965088955060009450849350839250611167915050565b50508751606089015160408a0151600190930192015b60009750899650919450925090505b9295509295909350565b611179611489565b506040805160e08101825260018082528451602083015293820151909301908301526060818101519083015260808082015167ffffffffffffffff169083015260a0808201519083015260c0908101519082015290565b600060608686868686604051602001808681526020018581526020018467ffffffffffffffff1667ffffffffffffffff1681526020018060200180602001838103835285818151815260200191508051906020019080838360005b8381101561124357818101518382015260200161122b565b50505050905090810190601f1680156112705780820380516001836020036101000a031916815260200191505b50838103825284518152845160209182019186019080838360005b838110156112a357818101518382015260200161128b565b50505050905090810190601f1680156112d05780820380516001836020036101000a031916815260200191505b50975050505050505050604051602081830303815290604052905080516020820160008083836012600019fa61130557600080fd5b5095979650505050505050565b600061131c611454565b6000806000611329611454565b61133287610f0e565b905061133c611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156114025780601f106113d757610100808354040283529160200191611402565b820191906000526020600020905b8154815290600101906020018083116113e557829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506114408282611001565b939c929b5090995097509095509350505050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b6040805160e0810182526000808252602082018190529181018290526060808201526080810182905260a0810182905260c081019190915290565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106114fd5780548555611539565b8280016001018555821561153957600052602060002091601f016020900482015b8281111561153957825482559160010191906001019061151e565b506115459291506115b7565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061158a57805160ff1916838001178555611539565b82800160010185558215611539579182015b8281111561153957825182559160200191906001019061159c565b6115d191905b8082111561154557600081556001016115bd565b9056fe4d697373696e672066696e616c69747920746172676574206865616465722066726f6d207468652073746f72616765a2646970667358221220edcaec08f93f74ce5be00b81da5d6b2276138571a33f1cfdca50e5047f854e6e64736f6c63430006060033 \ No newline at end of file diff --git a/relays/bin-ethereum/res/substrate-bridge-metadata.txt b/relays/bin-ethereum/res/substrate-bridge-metadata.txt deleted file mode 100644 index 13b7daa9a8b8..000000000000 --- a/relays/bin-ethereum/res/substrate-bridge-metadata.txt +++ /dev/null @@ -1,5 +0,0 @@ -Last Change Date: 2020-07-30 -Solc version: 0.6.6+commit.6c089d02.Linux.g++ -Source hash (keccak256): 0xea5d6d744f69157adc2857166792aca139c0b5b186ba89c1011358fbcad90d7e -Source gist: https://github.com/svyatonik/substrate-bridge-sol/blob/6456d3e016c95cd5e6d5e817c23e9e69e739aa78/substrate-bridge.sol -Compiler flags used (command to produce the file): `docker run -i ethereum/solc:0.6.6 --optimize --bin - < substrate-bridge.sol` \ No newline at end of file diff --git a/relays/bin-ethereum/src/cli.yml b/relays/bin-ethereum/src/cli.yml deleted file mode 100644 index 78971787c0e2..000000000000 --- a/relays/bin-ethereum/src/cli.yml +++ /dev/null @@ -1,166 +0,0 @@ -name: ethsub-bridge -version: "0.1.0" -author: Parity Technologies -about: Parity Ethereum (PoA) <-> Substrate bridge -subcommands: - - eth-to-sub: - about: Synchronize headers from Ethereum node to Substrate node. - args: - - eth-host: ð-host - long: eth-host - value_name: ETH_HOST - help: Connect to Ethereum node websocket server at given host. - takes_value: true - - eth-port: ð-port - long: eth-port - value_name: ETH_PORT - help: Connect to Ethereum node websocket server at given port. - takes_value: true - - sub-host: &sub-host - long: sub-host - value_name: SUB_HOST - help: Connect to Substrate node websocket server at given host. - takes_value: true - - sub-port: &sub-port - long: sub-port - value_name: SUB_PORT - help: Connect to Substrate node websocket server at given port. - takes_value: true - - sub-tx-mode: - long: sub-tx-mode - value_name: MODE - help: Submit headers using signed (default) or unsigned transactions. Third mode - backup - submits signed transactions only when we believe that sync has stalled. - takes_value: true - possible_values: - - signed - - unsigned - - backup - - sub-signer: &sub-signer - long: sub-signer - value_name: SUB_SIGNER - help: The SURI of secret key to use when transactions are submitted to the Substrate node. - - sub-signer-password: &sub-signer-password - long: sub-signer-password - value_name: SUB_SIGNER_PASSWORD - help: The password for the SURI of secret key to use when transactions are submitted to the Substrate node. - - sub-pallet-instance: &sub-pallet-instance - long: instance - short: i - value_name: PALLET_INSTANCE - help: The instance of the bridge pallet the relay should follow. - takes_value: true - case_insensitive: true - possible_values: - - Rialto - - Kovan - default_value: Rialto - - no-prometheus: &no-prometheus - long: no-prometheus - help: Do not expose a Prometheus metric endpoint. - - prometheus-host: &prometheus-host - long: prometheus-host - value_name: PROMETHEUS_HOST - help: Expose Prometheus endpoint at given interface. - - prometheus-port: &prometheus-port - long: prometheus-port - value_name: PROMETHEUS_PORT - help: Expose Prometheus endpoint at given port. - - sub-to-eth: - about: Synchronize headers from Substrate node to Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-contract: - long: eth-contract - value_name: ETH_CONTRACT - help: Address of deployed bridge contract. - takes_value: true - - eth-chain-id: ð-chain-id - long: eth-chain-id - value_name: ETH_CHAIN_ID - help: Chain ID to use for signing. - - eth-signer: ð-signer - long: eth-signer - value_name: ETH_SIGNER - help: Hex-encoded secret to use when transactions are submitted to the Ethereum node. - - sub-host: *sub-host - - sub-port: *sub-port - - no-prometheus: *no-prometheus - - prometheus-host: *prometheus-host - - prometheus-port: *prometheus-port - - eth-deploy-contract: - about: Deploy Bridge contract on Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-signer: *eth-signer - - eth-chain-id: *eth-chain-id - - eth-contract-code: - long: eth-contract-code - value_name: ETH_CONTRACT_CODE - help: Bytecode of bridge contract. - takes_value: true - - sub-host: *sub-host - - sub-port: *sub-port - - sub-authorities-set-id: - long: sub-authorities-set-id - value_name: SUB_AUTHORITIES_SET_ID - help: ID of initial GRANDPA authorities set. - takes_value: true - - sub-authorities-set: - long: sub-authorities-set - value_name: SUB_AUTHORITIES_SET - help: Encoded initial GRANDPA authorities set. - takes_value: true - - sub-initial-header: - long: sub-initial-header - value_name: SUB_INITIAL_HEADER - help: Encoded initial Substrate header. - takes_value: true - - eth-submit-exchange-tx: - about: Submit lock funds transaction to Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-nonce: - long: eth-nonce - value_name: ETH_NONCE - help: Nonce that have to be used when building transaction. If not specified, read from PoA node. - takes_value: true - - eth-signer: *eth-signer - - eth-chain-id: *eth-chain-id - - eth-amount: - long: eth-amount - value_name: ETH_AMOUNT - help: Amount of ETH to lock (in wei). - takes_value: true - - sub-recipient: - long: sub-recipient - value_name: SUB_RECIPIENT - help: Hex-encoded Public key of funds recipient in Substrate chain. - takes_value: true - - eth-exchange-sub: - about: Submit proof of PoA lock funds transaction to Substrate node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-start-with-block: - long: eth-start-with-block - value_name: ETH_START_WITH_BLOCK - help: Auto-relay transactions starting with given block number. If not specified, starts with best finalized Ethereum block (known to Substrate node) transactions. - takes_value: true - conflicts_with: - - eth-tx-hash - - eth-tx-hash: - long: eth-tx-hash - value_name: ETH_TX_HASH - help: Hash of the lock funds transaction. - takes_value: true - - sub-host: *sub-host - - sub-port: *sub-port - - sub-signer: *sub-signer - - sub-signer-password: *sub-signer-password - - sub-pallet-instance: *sub-pallet-instance - - no-prometheus: *no-prometheus - - prometheus-host: *prometheus-host - - prometheus-port: *prometheus-port diff --git a/relays/bin-ethereum/src/error.rs b/relays/bin-ethereum/src/error.rs deleted file mode 100644 index 61ae2a9a498d..000000000000 --- a/relays/bin-ethereum/src/error.rs +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::rpc_errors::RpcError; -use thiserror::Error; - -/// Result type used by PoA relay. -pub type Result = std::result::Result; - -/// Ethereum PoA relay errors. -#[derive(Error, Debug)] -pub enum Error { - /// Failed to decode initial header. - #[error("Error decoding initial header: {0}")] - DecodeInitialHeader(codec::Error), - /// RPC error. - #[error("{0}")] - Rpc(#[from] RpcError), - /// Failed to read genesis header. - #[error("Error reading Substrate genesis header: {0:?}")] - ReadGenesisHeader(relay_substrate_client::Error), - /// Failed to read initial GRANDPA authorities. - #[error("Error reading GRANDPA authorities set: {0:?}")] - ReadAuthorities(relay_substrate_client::Error), - /// Failed to deploy bridge contract to Ethereum chain. - #[error("Error deploying contract: {0:?}")] - DeployContract(RpcError), -} diff --git a/relays/bin-ethereum/src/ethereum_client.rs b/relays/bin-ethereum/src/ethereum_client.rs deleted file mode 100644 index 75ed57fea163..000000000000 --- a/relays/bin-ethereum/src/ethereum_client.rs +++ /dev/null @@ -1,631 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{rpc_errors::RpcError, substrate_sync_loop::QueuedRialtoHeader}; - -use async_trait::async_trait; -use bp_eth_poa::signatures::secret_to_address; -use codec::{Decode, Encode}; -use ethabi::FunctionOutputDecoder; -use headers_relay::sync_types::SubmittedHeaders; -use relay_ethereum_client::{ - sign_and_submit_transaction, - types::{Address, CallRequest, HeaderId as EthereumHeaderId, Receipt, H256, U256}, - Client as EthereumClient, Error as EthereumNodeError, SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::HeaderId as RialtoHeaderId; -use relay_utils::{HeaderId, MaybeConnectionError}; -use sp_runtime::EncodedJustification; -use std::collections::HashSet; - -// to encode/decode contract calls -ethabi_contract::use_contract!(bridge_contract, "res/substrate-bridge-abi.json"); - -type RpcResult = std::result::Result; - -/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated -/// interactions involving, for example, an Ethereum contract. -#[async_trait] -pub trait EthereumHighLevelRpc { - /// Returns the best Substrate block that PoA chain knows of. - async fn best_substrate_block(&self, contract_address: Address) -> RpcResult; - - /// Returns true if Substrate header is known to Ethereum node. - async fn substrate_header_known( - &self, - contract_address: Address, - id: RialtoHeaderId, - ) -> RpcResult<(RialtoHeaderId, bool)>; - - /// Submits Substrate headers to Ethereum contract. - async fn submit_substrate_headers( - &self, - params: EthereumSigningParams, - contract_address: Address, - headers: Vec, - ) -> SubmittedHeaders; - - /// Returns ids of incomplete Substrate headers. - async fn incomplete_substrate_headers( - &self, - contract_address: Address, - ) -> RpcResult>; - - /// Complete Substrate header. - async fn complete_substrate_header( - &self, - params: EthereumSigningParams, - contract_address: Address, - id: RialtoHeaderId, - justification: EncodedJustification, - ) -> RpcResult; - - /// Submit ethereum transaction. - async fn submit_ethereum_transaction( - &self, - params: &EthereumSigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, - ) -> RpcResult<()>; - - /// Retrieve transactions receipts for given block. - async fn transaction_receipts( - &self, - id: EthereumHeaderId, - transactions: Vec, - ) -> RpcResult<(EthereumHeaderId, Vec)>; -} - -#[async_trait] -impl EthereumHighLevelRpc for EthereumClient { - async fn best_substrate_block(&self, contract_address: Address) -> RpcResult { - let (encoded_call, call_decoder) = bridge_contract::functions::best_known_header::call(); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - let (number, raw_hash) = call_decoder.decode(&call_result.0)?; - let hash = rialto_runtime::Hash::decode(&mut &raw_hash[..])?; - - if number != number.low_u32().into() { - return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)) - } - - Ok(HeaderId(number.low_u32(), hash)) - } - - async fn substrate_header_known( - &self, - contract_address: Address, - id: RialtoHeaderId, - ) -> RpcResult<(RialtoHeaderId, bool)> { - let (encoded_call, call_decoder) = bridge_contract::functions::is_known_header::call(id.1); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - let is_known_block = call_decoder.decode(&call_result.0)?; - - Ok((id, is_known_block)) - } - - async fn submit_substrate_headers( - &self, - params: EthereumSigningParams, - contract_address: Address, - headers: Vec, - ) -> SubmittedHeaders { - // read nonce of signer - let address: Address = secret_to_address(¶ms.signer); - let nonce = match self.account_nonce(address).await { - Ok(nonce) => nonce, - Err(error) => - return SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: headers.iter().rev().map(|header| header.id()).collect(), - fatal_error: Some(error.into()), - }, - }; - - // submit headers. Note that we're cloning self here. It is ok, because - // cloning `jsonrpsee::Client` only clones reference to background threads - submit_substrate_headers( - EthereumHeadersSubmitter { client: self.clone(), params, contract_address, nonce }, - headers, - ) - .await - } - - async fn incomplete_substrate_headers( - &self, - contract_address: Address, - ) -> RpcResult> { - let (encoded_call, call_decoder) = bridge_contract::functions::incomplete_headers::call(); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - - // Q: Is is correct to call these "incomplete_ids"? - let (incomplete_headers_numbers, incomplete_headers_hashes) = - call_decoder.decode(&call_result.0)?; - let incomplete_ids = incomplete_headers_numbers - .into_iter() - .zip(incomplete_headers_hashes) - .filter_map(|(number, hash)| { - if number != number.low_u32().into() { - return None - } - - Some(HeaderId(number.low_u32(), hash)) - }) - .collect(); - - Ok(incomplete_ids) - } - - async fn complete_substrate_header( - &self, - params: EthereumSigningParams, - contract_address: Address, - id: RialtoHeaderId, - justification: EncodedJustification, - ) -> RpcResult { - let _ = self - .submit_ethereum_transaction( - ¶ms, - Some(contract_address), - None, - false, - bridge_contract::functions::import_finality_proof::encode_input( - id.0, - id.1, - justification, - ), - ) - .await?; - - Ok(id) - } - - async fn submit_ethereum_transaction( - &self, - params: &EthereumSigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, - ) -> RpcResult<()> { - sign_and_submit_transaction(self, params, contract_address, nonce, double_gas, encoded_call) - .await - .map_err(Into::into) - } - - async fn transaction_receipts( - &self, - id: EthereumHeaderId, - transactions: Vec, - ) -> RpcResult<(EthereumHeaderId, Vec)> { - let mut transaction_receipts = Vec::with_capacity(transactions.len()); - for transaction in transactions { - let transaction_receipt = self.transaction_receipt(transaction).await?; - transaction_receipts.push(transaction_receipt); - } - Ok((id, transaction_receipts)) - } -} - -/// Max number of headers which can be sent to Solidity contract. -pub const HEADERS_BATCH: usize = 4; - -/// Substrate headers to send to the Ethereum light client. -/// -/// The Solidity contract can only accept a fixed number of headers in one go. -/// This struct is meant to encapsulate this limitation. -#[derive(Debug)] -#[cfg_attr(test, derive(Clone))] -pub struct HeadersBatch { - pub header1: QueuedRialtoHeader, - pub header2: Option, - pub header3: Option, - pub header4: Option, -} - -impl HeadersBatch { - /// Create new headers from given header & ids collections. - /// - /// This method will pop `HEADERS_BATCH` items from both collections - /// and construct `Headers` object and a vector of `RialtoHeaderId`s. - pub fn pop_from( - headers: &mut Vec, - ids: &mut Vec, - ) -> Result<(Self, Vec), ()> { - if headers.len() != ids.len() { - log::error!(target: "bridge", "Collection size mismatch ({} vs {})", headers.len(), ids.len()); - return Err(()) - } - - let header1 = headers.pop().ok_or(())?; - let header2 = headers.pop(); - let header3 = headers.pop(); - let header4 = headers.pop(); - - let mut submitting_ids = Vec::with_capacity(HEADERS_BATCH); - for _ in 0..HEADERS_BATCH { - submitting_ids.extend(ids.pop().iter()); - } - - Ok((Self { header1, header2, header3, header4 }, submitting_ids)) - } - - /// Returns unified array of headers. - /// - /// The first element is always `Some`. - fn headers(&self) -> [Option<&QueuedRialtoHeader>; HEADERS_BATCH] { - [Some(&self.header1), self.header2.as_ref(), self.header3.as_ref(), self.header4.as_ref()] - } - - /// Encodes all headers. If header is not present an empty vector will be returned. - pub fn encode(&self) -> [Vec; HEADERS_BATCH] { - let encode = |h: &QueuedRialtoHeader| h.header().encode(); - let headers = self.headers(); - [ - headers[0].map(encode).unwrap_or_default(), - headers[1].map(encode).unwrap_or_default(), - headers[2].map(encode).unwrap_or_default(), - headers[3].map(encode).unwrap_or_default(), - ] - } - /// Returns number of contained headers. - pub fn len(&self) -> usize { - let is_set = |h: &Option<&QueuedRialtoHeader>| if h.is_some() { 1 } else { 0 }; - self.headers().iter().map(is_set).sum() - } - - /// Remove headers starting from `idx` (0-based) from this collection. - /// - /// The collection will be left with `[0, idx)` headers. - /// Returns `Err` when `idx == 0`, since `Headers` must contain at least one header, - /// or when `idx > HEADERS_BATCH`. - pub fn split_off(&mut self, idx: usize) -> Result<(), ()> { - if idx == 0 || idx > HEADERS_BATCH { - return Err(()) - } - let mut vals: [_; HEADERS_BATCH] = - [&mut None, &mut self.header2, &mut self.header3, &mut self.header4]; - for val in vals.iter_mut().skip(idx) { - **val = None; - } - Ok(()) - } -} - -/// Substrate headers submitter API. -#[async_trait] -trait HeadersSubmitter { - /// Returns Ok(0) if all given not-yet-imported headers are complete. - /// Returns Ok(index != 0) where index is 1-based index of first header that is incomplete. - /// - /// Returns Err(()) if contract has rejected headers. This means that the contract is - /// unable to import first header (e.g. it may already be imported). - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult; - - /// Submit given headers to Ethereum node. - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()>; -} - -/// Implementation of Substrate headers submitter that sends headers to running Ethereum node. -struct EthereumHeadersSubmitter { - client: EthereumClient, - params: EthereumSigningParams, - contract_address: Address, - nonce: U256, -} - -#[async_trait] -impl HeadersSubmitter for EthereumHeadersSubmitter { - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { - let [h1, h2, h3, h4] = headers.encode(); - let (encoded_call, call_decoder) = - bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4); - let call_request = CallRequest { - to: Some(self.contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.client.eth_call(call_request).await?; - let incomplete_index: U256 = call_decoder.decode(&call_result.0)?; - if incomplete_index > HEADERS_BATCH.into() { - return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex)) - } - - Ok(incomplete_index.low_u32() as _) - } - - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { - let [h1, h2, h3, h4] = headers.encode(); - let result = self - .client - .submit_ethereum_transaction( - &self.params, - Some(self.contract_address), - Some(self.nonce), - false, - bridge_contract::functions::import_headers::encode_input(h1, h2, h3, h4), - ) - .await; - - if result.is_ok() { - self.nonce += U256::one(); - } - - result - } -} - -/// Submit multiple Substrate headers. -async fn submit_substrate_headers( - mut header_submitter: impl HeadersSubmitter, - mut headers: Vec, -) -> SubmittedHeaders { - let mut submitted_headers = SubmittedHeaders::default(); - - let mut ids = headers.iter().map(|header| header.id()).rev().collect::>(); - headers.reverse(); - - while !headers.is_empty() { - let (headers, submitting_ids) = HeadersBatch::pop_from(&mut headers, &mut ids) - .expect("Headers and ids are not empty; qed"); - - submitted_headers.fatal_error = submit_substrate_headers_batch( - &mut header_submitter, - &mut submitted_headers, - submitting_ids, - headers, - ) - .await; - - if submitted_headers.fatal_error.is_some() { - ids.reverse(); - submitted_headers.rejected.extend(ids); - break - } - } - - submitted_headers -} - -/// Submit 4 Substrate headers in single PoA transaction. -async fn submit_substrate_headers_batch( - header_submitter: &mut impl HeadersSubmitter, - submitted_headers: &mut SubmittedHeaders, - mut ids: Vec, - mut headers: HeadersBatch, -) -> Option { - debug_assert_eq!(ids.len(), headers.len(),); - - // if parent of first header is either incomplete, or rejected, we assume that contract - // will reject this header as well - let parent_id = headers.header1.parent_id(); - if submitted_headers.rejected.contains(&parent_id) || - submitted_headers.incomplete.contains(&parent_id) - { - submitted_headers.rejected.extend(ids); - return None - } - - // check if headers are incomplete - let incomplete_header_index = match header_submitter.is_headers_incomplete(&headers).await { - // All headers valid - Ok(0) => None, - Ok(incomplete_header_index) => Some(incomplete_header_index), - Err(error) => { - // contract has rejected all headers => we do not want to submit it - submitted_headers.rejected.extend(ids); - if error.is_connection_error() { - return Some(error) - } else { - return None - } - }, - }; - - // Modify `ids` and `headers` to only contain values that are going to be accepted. - let rejected = if let Some(idx) = incomplete_header_index { - let len = std::cmp::min(idx, ids.len()); - headers - .split_off(len) - .expect("len > 0, the case where all headers are valid is converted to None; qed"); - ids.split_off(len) - } else { - Vec::new() - }; - let submitted = ids; - let submit_result = header_submitter.submit_headers(headers).await; - match submit_result { - Ok(_) => { - if incomplete_header_index.is_some() { - submitted_headers.incomplete.extend(submitted.iter().last().cloned()); - } - submitted_headers.submitted.extend(submitted); - submitted_headers.rejected.extend(rejected); - None - }, - Err(error) => { - submitted_headers.rejected.extend(submitted); - submitted_headers.rejected.extend(rejected); - Some(error) - }, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::traits::Header; - - struct TestHeadersSubmitter { - incomplete: Vec, - failed: Vec, - } - - #[async_trait] - impl HeadersSubmitter for TestHeadersSubmitter { - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { - if self.incomplete.iter().any(|i| i.0 == headers.header1.id().0) { - Ok(1) - } else { - Ok(0) - } - } - - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { - if self.failed.iter().any(|i| i.0 == headers.header1.id().0) { - Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)) - } else { - Ok(()) - } - } - } - - fn header(number: rialto_runtime::BlockNumber) -> QueuedRialtoHeader { - QueuedRialtoHeader::new( - rialto_runtime::Header::new( - number, - Default::default(), - Default::default(), - if number == 0 { Default::default() } else { header(number - 1).id().1 }, - Default::default(), - ) - .into(), - ) - } - - #[test] - fn descendants_of_incomplete_headers_are_not_submitted() { - let submitted_headers = async_std::task::block_on(submit_substrate_headers( - TestHeadersSubmitter { incomplete: vec![header(5).id()], failed: vec![] }, - vec![header(5), header(6)], - )); - assert_eq!(submitted_headers.submitted, vec![header(5).id()]); - assert_eq!(submitted_headers.incomplete, vec![header(5).id()]); - assert_eq!(submitted_headers.rejected, vec![header(6).id()]); - assert!(submitted_headers.fatal_error.is_none()); - } - - #[test] - fn headers_after_fatal_error_are_not_submitted() { - let submitted_headers = async_std::task::block_on(submit_substrate_headers( - TestHeadersSubmitter { incomplete: vec![], failed: vec![header(9).id()] }, - vec![header(5), header(6), header(7), header(8), header(9), header(10), header(11)], - )); - assert_eq!( - submitted_headers.submitted, - vec![header(5).id(), header(6).id(), header(7).id(), header(8).id()] - ); - assert_eq!(submitted_headers.incomplete, vec![]); - assert_eq!( - submitted_headers.rejected, - vec![header(9).id(), header(10).id(), header(11).id(),] - ); - assert!(submitted_headers.fatal_error.is_some()); - } - - fn headers_batch() -> HeadersBatch { - let mut init_headers = vec![header(1), header(2), header(3), header(4), header(5)]; - init_headers.reverse(); - let mut init_ids = init_headers.iter().map(|h| h.id()).collect(); - let (headers, ids) = HeadersBatch::pop_from(&mut init_headers, &mut init_ids).unwrap(); - assert_eq!(init_headers, vec![header(5)]); - assert_eq!(init_ids, vec![header(5).id()]); - assert_eq!(ids, vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()]); - headers - } - - #[test] - fn headers_batch_len() { - let headers = headers_batch(); - assert_eq!(headers.len(), 4); - } - - #[test] - fn headers_batch_encode() { - let headers = headers_batch(); - assert_eq!( - headers.encode(), - [ - header(1).header().encode(), - header(2).header().encode(), - header(3).header().encode(), - header(4).header().encode(), - ] - ); - } - - #[test] - fn headers_batch_split_off() { - // given - let mut headers = headers_batch(); - - // when - assert!(headers.split_off(0).is_err()); - assert_eq!(headers.header1, header(1)); - assert!(headers.header2.is_some()); - assert!(headers.header3.is_some()); - assert!(headers.header4.is_some()); - - // when - let mut h = headers.clone(); - h.split_off(1).unwrap(); - assert!(h.header2.is_none()); - assert!(h.header3.is_none()); - assert!(h.header4.is_none()); - - // when - let mut h = headers.clone(); - h.split_off(2).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_none()); - assert!(h.header4.is_none()); - - // when - let mut h = headers.clone(); - h.split_off(3).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_some()); - assert!(h.header4.is_none()); - - // when - let mut h = headers; - h.split_off(4).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_some()); - assert!(h.header4.is_some()); - } -} diff --git a/relays/bin-ethereum/src/ethereum_deploy_contract.rs b/relays/bin-ethereum/src/ethereum_deploy_contract.rs deleted file mode 100644 index 76a75b062ecb..000000000000 --- a/relays/bin-ethereum/src/ethereum_deploy_contract.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::{Error, Result}, - ethereum_client::{bridge_contract, EthereumHighLevelRpc}, - rpc_errors::RpcError, -}; - -use codec::{Decode, Encode}; -use num_traits::Zero; -use relay_ethereum_client::{ - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, - SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto}; -use relay_substrate_client::{ - Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, - OpaqueGrandpaAuthoritiesSet, -}; -use relay_utils::HeaderId; - -/// Ethereum synchronization parameters. -#[derive(Debug)] -pub struct EthereumDeployContractParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum contract bytecode. - pub eth_contract_code: Vec, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Initial authorities set id. - pub sub_initial_authorities_set_id: Option, - /// Initial authorities set. - pub sub_initial_authorities_set: Option>, - /// Initial header. - pub sub_initial_header: Option>, -} - -/// Deploy Bridge contract on Ethereum chain. -pub async fn run(params: EthereumDeployContractParams) { - let EthereumDeployContractParams { - eth_params, - eth_sign, - sub_params, - sub_initial_authorities_set_id, - sub_initial_authorities_set, - sub_initial_header, - eth_contract_code, - } = params; - - let result = async move { - let eth_client = EthereumClient::try_connect(eth_params).await.map_err(RpcError::Ethereum)?; - let sub_client = SubstrateClient::::try_connect(sub_params).await.map_err(RpcError::Substrate)?; - - let (initial_header_id, initial_header) = prepare_initial_header(&sub_client, sub_initial_header).await?; - let initial_set_id = sub_initial_authorities_set_id.unwrap_or(0); - let initial_set = prepare_initial_authorities_set( - &sub_client, - initial_header_id.1, - sub_initial_authorities_set, - ).await?; - - log::info!( - target: "bridge", - "Deploying Ethereum contract.\r\n\tInitial header: {:?}\r\n\tInitial header id: {:?}\r\n\tInitial header encoded: {}\r\n\tInitial authorities set ID: {}\r\n\tInitial authorities set: {}", - initial_header, - initial_header_id, - hex::encode(&initial_header), - initial_set_id, - hex::encode(&initial_set), - ); - - deploy_bridge_contract( - ð_client, - ð_sign, - eth_contract_code, - initial_header, - initial_set_id, - initial_set, - ).await - }.await; - - if let Err(error) = result { - log::error!(target: "bridge", "{}", error); - } -} - -/// Prepare initial header. -async fn prepare_initial_header( - sub_client: &SubstrateClient, - sub_initial_header: Option>, -) -> Result<(RialtoHeaderId, Vec)> { - match sub_initial_header { - Some(raw_initial_header) => { - match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) { - Ok(initial_header) => - Ok((HeaderId(initial_header.number, initial_header.hash()), raw_initial_header)), - Err(error) => Err(Error::DecodeInitialHeader(error)), - } - }, - None => { - let initial_header = sub_client.header_by_number(Zero::zero()).await; - initial_header - .map(|header| (HeaderId(Zero::zero(), header.hash()), header.encode())) - .map_err(Error::ReadGenesisHeader) - }, - } -} - -/// Prepare initial GRANDPA authorities set. -async fn prepare_initial_authorities_set( - sub_client: &SubstrateClient, - sub_initial_header_hash: rialto_runtime::Hash, - sub_initial_authorities_set: Option>, -) -> Result { - let initial_authorities_set = match sub_initial_authorities_set { - Some(initial_authorities_set) => Ok(initial_authorities_set), - None => sub_client.grandpa_authorities_set(sub_initial_header_hash).await, - }; - - initial_authorities_set.map_err(Error::ReadAuthorities) -} - -/// Deploy bridge contract to Ethereum chain. -async fn deploy_bridge_contract( - eth_client: &EthereumClient, - params: &EthereumSigningParams, - contract_code: Vec, - initial_header: Vec, - initial_set_id: u64, - initial_authorities: Vec, -) -> Result<()> { - eth_client - .submit_ethereum_transaction( - params, - None, - None, - false, - bridge_contract::constructor( - contract_code, - initial_header, - initial_set_id, - initial_authorities, - ), - ) - .await - .map_err(Error::DeployContract) -} diff --git a/relays/bin-ethereum/src/ethereum_exchange.rs b/relays/bin-ethereum/src/ethereum_exchange.rs deleted file mode 100644 index 90d9a23835d4..000000000000 --- a/relays/bin-ethereum/src/ethereum_exchange.rs +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of PoA -> Substrate exchange transactions. - -use crate::{ - instances::BridgeInstance, - rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc}, - rpc_errors::RpcError, - substrate_types::into_substrate_ethereum_receipt, -}; - -use async_trait::async_trait; -use bp_currency_exchange::MaybeLockFundsTransaction; -use exchange_relay::{ - exchange::{ - relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient, - TransactionProofPipeline, - }, - exchange_loop::{run as run_loop, InMemoryStorage}, -}; -use relay_ethereum_client::{ - types::{ - HeaderId as EthereumHeaderId, HeaderWithTransactions as EthereumHeaderWithTransactions, - Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256, - HEADER_ID_PROOF, - }, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, -}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Chain as SubstrateChain, Client as SubstrateClient, - ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, HeaderId}; -use rialto_runtime::exchange::EthereumTransactionInclusionProof; -use std::{sync::Arc, time::Duration}; - -/// Interval at which we ask Ethereum node for updates. -const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); - -/// Exchange relay mode. -#[derive(Debug)] -pub enum ExchangeRelayMode { - /// Relay single transaction and quit. - Single(EthereumTransactionHash), - /// Auto-relay transactions starting with given block. - Auto(Option), -} - -/// PoA exchange transaction relay params. -pub struct EthereumExchangeParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Substrate signing params. - pub sub_sign: RialtoSigningParams, - /// Relay working mode. - pub mode: ExchangeRelayMode, - /// Metrics parameters. - pub metrics_params: MetricsParams, - /// Instance of the bridge pallet being synchronized. - pub instance: Arc, -} - -impl std::fmt::Debug for EthereumExchangeParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.debug_struct("EthereumExchangeParams") - .field("eth_params", &self.eth_params) - .field("sub_params", &self.sub_params) - .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) - .field("mode", &self.mode) - .field("metrics_params", &self.metrics_params) - .field("instance", &self.instance) - .finish() - } -} - -/// Ethereum to Substrate exchange pipeline. -struct EthereumToSubstrateExchange; - -impl TransactionProofPipeline for EthereumToSubstrateExchange { - const SOURCE_NAME: &'static str = "Ethereum"; - const TARGET_NAME: &'static str = "Substrate"; - - type Block = EthereumSourceBlock; - type TransactionProof = EthereumTransactionInclusionProof; -} - -/// Ethereum source block. -struct EthereumSourceBlock(EthereumHeaderWithTransactions); - -impl SourceBlock for EthereumSourceBlock { - type Hash = H256; - type Number = u64; - type Transaction = EthereumSourceTransaction; - - fn id(&self) -> EthereumHeaderId { - HeaderId( - self.0.number.expect(HEADER_ID_PROOF).as_u64(), - self.0.hash.expect(HEADER_ID_PROOF), - ) - } - - fn transactions(&self) -> Vec { - self.0.transactions.iter().cloned().map(EthereumSourceTransaction).collect() - } -} - -/// Ethereum source transaction. -struct EthereumSourceTransaction(EthereumTransaction); - -impl SourceTransaction for EthereumSourceTransaction { - type Hash = EthereumTransactionHash; - - fn hash(&self) -> Self::Hash { - self.0.hash - } -} - -/// Ethereum node as transactions proof source. -#[derive(Clone)] -struct EthereumTransactionsSource { - client: EthereumClient, -} - -#[async_trait] -impl RelayClient for EthereumTransactionsSource { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl SourceClient for EthereumTransactionsSource { - async fn tick(&self) { - async_std::task::sleep(ETHEREUM_TICK_INTERVAL).await; - } - - async fn block_by_hash(&self, hash: H256) -> Result { - self.client - .header_by_hash_with_transactions(hash) - .await - .map(EthereumSourceBlock) - .map_err(Into::into) - } - - async fn block_by_number(&self, number: u64) -> Result { - self.client - .header_by_number_with_transactions(number) - .await - .map(EthereumSourceBlock) - .map_err(Into::into) - } - - async fn transaction_block( - &self, - hash: &EthereumTransactionHash, - ) -> Result, RpcError> { - let eth_tx = match self.client.transaction_by_hash(*hash).await? { - Some(eth_tx) => eth_tx, - None => return Ok(None), - }; - - // we need transaction to be mined => check if it is included in the block - let (eth_header_id, eth_tx_index) = - match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) { - (Some(block_number), Some(block_hash), Some(transaction_index)) => - (HeaderId(block_number.as_u64(), block_hash), transaction_index.as_u64() as _), - _ => return Ok(None), - }; - - Ok(Some((eth_header_id, eth_tx_index))) - } - - async fn transaction_proof( - &self, - block: &EthereumSourceBlock, - tx_index: usize, - ) -> Result { - const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = - "RPC level checks that transactions from Ethereum\ - node are having `raw` field; qed"; - const BLOCK_HAS_HASH_FIELD_PROOF: &str = - "RPC level checks that block has `hash` field; qed"; - - let mut transaction_proof = Vec::with_capacity(block.0.transactions.len()); - for tx in &block.0.transactions { - let raw_tx_receipt = self - .client - .transaction_receipt(tx.hash) - .await - .map(|receipt| into_substrate_ethereum_receipt(&receipt)) - .map(|receipt| receipt.rlp())?; - let raw_tx = tx.raw.clone().expect(TRANSACTION_HAS_RAW_FIELD_PROOF).0; - transaction_proof.push((raw_tx, raw_tx_receipt)); - } - - Ok(EthereumTransactionInclusionProof { - block: block.0.hash.expect(BLOCK_HAS_HASH_FIELD_PROOF), - index: tx_index as _, - proof: transaction_proof, - }) - } -} - -/// Substrate node as transactions proof target. -#[derive(Clone)] -struct SubstrateTransactionsTarget { - client: SubstrateClient, - sign_params: RialtoSigningParams, - bridge_instance: Arc, -} - -#[async_trait] -impl RelayClient for SubstrateTransactionsTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - Ok(self.client.reconnect().await?) - } -} - -#[async_trait] -impl TargetClient for SubstrateTransactionsTarget { - async fn tick(&self) { - async_std::task::sleep(Rialto::AVERAGE_BLOCK_INTERVAL).await; - } - - async fn is_header_known(&self, id: &EthereumHeaderId) -> Result { - self.client.ethereum_header_known(*id).await - } - - async fn is_header_finalized(&self, id: &EthereumHeaderId) -> Result { - // we check if header is finalized by simple comparison of the header number and - // number of best finalized PoA header known to Substrate node. - // - // this may lead to failure in tx proof import if PoA reorganization has happened - // after we have checked that our tx has been included into given block - // - // the fix is easy, but since this code is mostly developed for demonstration purposes, - // I'm leaving this KISS-based design here - let best_finalized_ethereum_block = self.client.best_ethereum_finalized_block().await?; - Ok(id.0 <= best_finalized_ethereum_block.0) - } - - async fn best_finalized_header_id(&self) -> Result { - // we can't continue to relay exchange proofs if Substrate node is out of sync, because - // it may have already received (some of) proofs that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_ethereum_finalized_block().await - } - - async fn filter_transaction_proof( - &self, - proof: &EthereumTransactionInclusionProof, - ) -> Result { - // let's try to parse transaction locally - let (raw_tx, raw_tx_receipt) = &proof.proof[proof.index as usize]; - let parse_result = rialto_runtime::exchange::EthTransaction::parse(raw_tx); - if parse_result.is_err() { - return Ok(false) - } - - // now let's check if transaction is successful - match bp_eth_poa::Receipt::is_successful_raw_receipt(raw_tx_receipt) { - Ok(true) => (), - _ => return Ok(false), - } - - // seems that transaction is relayable - let's check if runtime is able to import it - // (we can't if e.g. header is pruned or there's some issue with tx data) - self.client.verify_exchange_transaction_proof(proof.clone()).await - } - - async fn submit_transaction_proof( - &self, - proof: EthereumTransactionInclusionProof, - ) -> Result<(), RpcError> { - let (sign_params, bridge_instance) = - (self.sign_params.clone(), self.bridge_instance.clone()); - self.client - .submit_exchange_transaction_proof(sign_params, bridge_instance, proof) - .await - } -} - -/// Relay exchange transaction proof(s) to Substrate node. -pub async fn run(params: EthereumExchangeParams) { - match params.mode { - ExchangeRelayMode::Single(eth_tx_hash) => { - let result = run_single_transaction_relay(params, eth_tx_hash).await; - match result { - Ok(_) => log::info!( - target: "bridge", - "Ethereum transaction {} proof has been successfully submitted to Substrate node", - eth_tx_hash, - ), - Err(err) => log::error!( - target: "bridge", - "Error submitting Ethereum transaction {} proof to Substrate node: {}", - eth_tx_hash, - err, - ), - } - }, - ExchangeRelayMode::Auto(eth_start_with_block_number) => { - let result = - run_auto_transactions_relay_loop(params, eth_start_with_block_number).await; - if let Err(err) = result { - log::error!( - target: "bridge", - "Error auto-relaying Ethereum transactions proofs to Substrate node: {}", - err, - ); - } - }, - } -} - -/// Run single transaction proof relay and stop. -async fn run_single_transaction_relay( - params: EthereumExchangeParams, - eth_tx_hash: H256, -) -> anyhow::Result<()> { - let EthereumExchangeParams { eth_params, sub_params, sub_sign, instance, .. } = params; - - let eth_client = EthereumClient::try_connect(eth_params).await.map_err(RpcError::Ethereum)?; - let sub_client = SubstrateClient::::try_connect(sub_params) - .await - .map_err(RpcError::Substrate)?; - - let source = EthereumTransactionsSource { client: eth_client }; - let target = SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }; - - relay_single_transaction_proof(&source, &target, eth_tx_hash) - .await - .map_err(Into::into) -} - -async fn run_auto_transactions_relay_loop( - params: EthereumExchangeParams, - eth_start_with_block_number: Option, -) -> anyhow::Result<()> { - let EthereumExchangeParams { - eth_params, sub_params, sub_sign, metrics_params, instance, .. - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let eth_start_with_block_number = match eth_start_with_block_number { - Some(eth_start_with_block_number) => eth_start_with_block_number, - None => - sub_client - .best_ethereum_finalized_block() - .await - .map_err(|err| { - anyhow::format_err!( - "Error retrieving best finalized Ethereum block from Substrate node: {:?}", - err - ) - })? - .0, - }; - - run_loop( - InMemoryStorage::new(eth_start_with_block_number), - EthereumTransactionsSource { client: eth_client }, - SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }, - metrics_params, - futures::future::pending(), - ) - .await?; - - Ok(()) -} diff --git a/relays/bin-ethereum/src/ethereum_exchange_submit.rs b/relays/bin-ethereum/src/ethereum_exchange_submit.rs deleted file mode 100644 index f68a21e594e0..000000000000 --- a/relays/bin-ethereum/src/ethereum_exchange_submit.rs +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Submitting Ethereum -> Substrate exchange transactions. - -use anyhow::anyhow; -use bp_eth_poa::{ - signatures::{secret_to_address, SignTransaction}, - UnsignedTransaction, -}; -use relay_ethereum_client::{ - types::{CallRequest, U256}, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, - SigningParams as EthereumSigningParams, -}; -use rialto_runtime::exchange::LOCK_FUNDS_ADDRESS; - -/// Ethereum exchange transaction params. -#[derive(Debug)] -pub struct EthereumExchangeSubmitParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum signer nonce. - pub eth_nonce: Option, - /// Amount of Ethereum tokens to lock. - pub eth_amount: U256, - /// Funds recipient on Substrate side. - pub sub_recipient: [u8; 32], -} - -/// Submit single Ethereum -> Substrate exchange transaction. -pub async fn run(params: EthereumExchangeSubmitParams) { - let EthereumExchangeSubmitParams { eth_params, eth_sign, eth_nonce, eth_amount, sub_recipient } = - params; - - let result: anyhow::Result<_> = async move { - let eth_client = EthereumClient::try_connect(eth_params) - .await - .map_err(|err| anyhow!("error connecting to Ethereum node: {:?}", err))?; - - let eth_signer_address = secret_to_address(ð_sign.signer); - let sub_recipient_encoded = sub_recipient; - let nonce = match eth_nonce { - Some(eth_nonce) => eth_nonce, - None => eth_client - .account_nonce(eth_signer_address) - .await - .map_err(|err| anyhow!("error fetching acount nonce: {:?}", err))?, - }; - let gas = eth_client - .estimate_gas(CallRequest { - from: Some(eth_signer_address), - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: Some(eth_amount), - data: Some(sub_recipient_encoded.to_vec().into()), - ..Default::default() - }) - .await - .map_err(|err| anyhow!("error estimating gas requirements: {:?}", err))?; - let eth_tx_unsigned = UnsignedTransaction { - nonce, - gas_price: eth_sign.gas_price, - gas, - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: eth_amount, - payload: sub_recipient_encoded.to_vec(), - }; - let eth_tx_signed = - eth_tx_unsigned.clone().sign_by(ð_sign.signer, Some(eth_sign.chain_id)); - eth_client - .submit_transaction(eth_tx_signed) - .await - .map_err(|err| anyhow!("error submitting transaction: {:?}", err))?; - - Ok(eth_tx_unsigned) - } - .await; - - match result { - Ok(eth_tx_unsigned) => { - log::info!( - target: "bridge", - "Exchange transaction has been submitted to Ethereum node: {:?}", - eth_tx_unsigned, - ); - }, - Err(err) => { - log::error!( - target: "bridge", - "Error submitting exchange transaction to Ethereum node: {}", - err, - ); - }, - } -} diff --git a/relays/bin-ethereum/src/ethereum_sync_loop.rs b/relays/bin-ethereum/src/ethereum_sync_loop.rs deleted file mode 100644 index ee5f8a4600ec..000000000000 --- a/relays/bin-ethereum/src/ethereum_sync_loop.rs +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum PoA -> Rialto-Substrate synchronization. - -use crate::{ - ethereum_client::EthereumHighLevelRpc, - instances::BridgeInstance, - rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc}, - rpc_errors::RpcError, - substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}, -}; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::{HeadersSyncParams, TargetTransactionMode}, - sync_loop::{SourceClient, TargetClient}, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; -use relay_ethereum_client::{ - types::{HeaderHash, HeaderId as EthereumHeaderId, Receipt, SyncHeader as Header}, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, -}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Chain as SubstrateChain, Client as SubstrateClient, - ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; - -use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration}; - -pub mod consts { - use super::*; - - /// Interval at which we check new Ethereum headers when we are synced/almost synced. - pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); - /// Max number of headers in single submit transaction. - pub const MAX_HEADERS_IN_SINGLE_SUBMIT: usize = 32; - /// Max total size of headers in single submit transaction. This only affects signed - /// submissions, when several headers are submitted at once. 4096 is the maximal **expected** - /// size of the Ethereum header + transactions receipts (if they're required). - pub const MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT: usize = MAX_HEADERS_IN_SINGLE_SUBMIT * 4096; - /// Max Ethereum headers we want to have in all 'before-submitted' states. - pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 128; - /// Max Ethereum headers count we want to have in 'submitted' state. - pub const MAX_SUBMITTED_HEADERS: usize = 128; - /// Max depth of in-memory headers in all states. Past this depth they will be forgotten - /// (pruned). - pub const PRUNE_DEPTH: u32 = 4096; -} - -/// Ethereum synchronization parameters. -pub struct EthereumSyncParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Substrate signing params. - pub sub_sign: RialtoSigningParams, - /// Synchronization parameters. - pub sync_params: HeadersSyncParams, - /// Metrics parameters. - pub metrics_params: MetricsParams, - /// Instance of the bridge pallet being synchronized. - pub instance: Arc, -} - -impl Debug for EthereumSyncParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.debug_struct("EthereumSyncParams") - .field("eth_params", &self.eth_params) - .field("sub_params", &self.sub_params) - .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) - .field("sync_params", &self.sync_params) - .field("metrics_params", &self.metrics_params) - .field("instance", &self.instance) - .finish() - } -} - -/// Ethereum synchronization pipeline. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct EthereumHeadersSyncPipeline; - -impl HeadersSyncPipeline for EthereumHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Ethereum"; - const TARGET_NAME: &'static str = "Substrate"; - - type Hash = HeaderHash; - type Number = u64; - type Header = Header; - type Extra = Vec; - type Completion = (); - - fn estimate_size(source: &QueuedHeader) -> usize { - into_substrate_ethereum_header(source.header()).encode().len() + - into_substrate_ethereum_receipts(source.extra()) - .map(|extra| extra.encode().len()) - .unwrap_or(0) - } -} - -/// Queued ethereum header ID. -pub type QueuedEthereumHeader = QueuedHeader; - -/// Ethereum client as headers source. -#[derive(Clone)] -struct EthereumHeadersSource { - /// Ethereum node client. - client: EthereumClient, -} - -impl EthereumHeadersSource { - fn new(client: EthereumClient) -> Self { - Self { client } - } -} - -#[async_trait] -impl RelayClient for EthereumHeadersSource { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl SourceClient for EthereumHeadersSource { - async fn best_block_number(&self) -> Result { - // we **CAN** continue to relay headers if Ethereum node is out of sync, because - // Substrate node may be missing headers that are already available at the Ethereum - - self.client.best_block_number().await.map_err(Into::into) - } - - async fn header_by_hash(&self, hash: HeaderHash) -> Result { - self.client.header_by_hash(hash).await.map(Into::into).map_err(Into::into) - } - - async fn header_by_number(&self, number: u64) -> Result { - self.client.header_by_number(number).await.map(Into::into).map_err(Into::into) - } - - async fn header_completion( - &self, - id: EthereumHeaderId, - ) -> Result<(EthereumHeaderId, Option<()>), RpcError> { - Ok((id, None)) - } - - async fn header_extra( - &self, - id: EthereumHeaderId, - header: QueuedEthereumHeader, - ) -> Result<(EthereumHeaderId, Vec), RpcError> { - self.client.transaction_receipts(id, header.header().transactions.clone()).await - } -} - -#[derive(Clone)] -struct SubstrateHeadersTarget { - /// Substrate node client. - client: SubstrateClient, - /// Whether we want to submit signed (true), or unsigned (false) transactions. - sign_transactions: bool, - /// Substrate signing params. - sign_params: RialtoSigningParams, - /// Bridge instance used in Ethereum to Substrate sync. - bridge_instance: Arc, -} - -impl SubstrateHeadersTarget { - fn new( - client: SubstrateClient, - sign_transactions: bool, - sign_params: RialtoSigningParams, - bridge_instance: Arc, - ) -> Self { - Self { client, sign_transactions, sign_params, bridge_instance } - } -} - -#[async_trait] -impl RelayClient for SubstrateHeadersTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - Ok(self.client.reconnect().await?) - } -} - -#[async_trait] -impl TargetClient for SubstrateHeadersTarget { - async fn best_header_id(&self) -> Result { - // we can't continue to relay headers if Substrate node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_ethereum_block().await - } - - async fn is_known_header( - &self, - id: EthereumHeaderId, - ) -> Result<(EthereumHeaderId, bool), RpcError> { - Ok((id, self.client.ethereum_header_known(id).await?)) - } - - async fn submit_headers( - &self, - headers: Vec, - ) -> SubmittedHeaders { - let (sign_params, bridge_instance, sign_transactions) = - (self.sign_params.clone(), self.bridge_instance.clone(), self.sign_transactions); - self.client - .submit_ethereum_headers(sign_params, bridge_instance, headers, sign_transactions) - .await - } - - async fn incomplete_headers_ids(&self) -> Result, RpcError> { - Ok(HashSet::new()) - } - - #[allow(clippy::unit_arg)] - async fn complete_header( - &self, - id: EthereumHeaderId, - _completion: (), - ) -> Result { - Ok(id) - } - - async fn requires_extra( - &self, - header: QueuedEthereumHeader, - ) -> Result<(EthereumHeaderId, bool), RpcError> { - // we can minimize number of receipts_check calls by checking header - // logs bloom here, but it may give us false positives (when authorities - // source is contract, we never need any logs) - let id = header.header().id(); - let sub_eth_header = into_substrate_ethereum_header(header.header()); - Ok((id, self.client.ethereum_receipts_required(sub_eth_header).await?)) - } -} - -/// Run Ethereum headers synchronization. -pub async fn run(params: EthereumSyncParams) -> Result<(), RpcError> { - let EthereumSyncParams { - eth_params, - sub_params, - sub_sign, - sync_params, - metrics_params, - instance, - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let sign_sub_transactions = match sync_params.target_tx_mode { - TargetTransactionMode::Signed | TargetTransactionMode::Backup => true, - TargetTransactionMode::Unsigned => false, - }; - - let source = EthereumHeadersSource::new(eth_client); - let target = SubstrateHeadersTarget::new(sub_client, sign_sub_transactions, sub_sign, instance); - - headers_relay::sync_loop::run( - source, - consts::ETHEREUM_TICK_INTERVAL, - target, - Rialto::AVERAGE_BLOCK_INTERVAL, - (), - sync_params, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(|e| RpcError::SyncLoop(e.to_string()))?; - - Ok(()) -} diff --git a/relays/bin-ethereum/src/instances.rs b/relays/bin-ethereum/src/instances.rs deleted file mode 100644 index 74feb1da320d..000000000000 --- a/relays/bin-ethereum/src/instances.rs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The PoA Bridge Pallet provides a way to include multiple instances of itself in a runtime. When -//! synchronizing a Substrate chain which can include multiple instances of the bridge pallet we -//! must somehow decide which of the instances to sync. -//! -//! Note that each instance of the bridge pallet is coupled with an instance of the currency -//! exchange pallet. We must also have a way to create `Call`s for the correct currency exchange -//! instance. -//! -//! This module helps by preparing the correct `Call`s for each of the different pallet instances. - -use crate::{ - ethereum_sync_loop::QueuedEthereumHeader, - substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}, -}; - -use rialto_runtime::{exchange::EthereumTransactionInclusionProof as Proof, Call}; - -/// Interface for `Calls` which are needed to correctly sync the bridge. -/// -/// Each instance of the bridge and currency exchange pallets in the bridge runtime requires similar -/// but slightly different `Call` in order to be synchronized. -pub trait BridgeInstance: Send + Sync + std::fmt::Debug { - /// Used to build a `Call` for importing signed headers to a Substrate runtime. - fn build_signed_header_call(&self, headers: Vec) -> Call; - /// Used to build a `Call` for importing an unsigned header to a Substrate runtime. - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call; - /// Used to build a `Call` for importing peer transactions to a Substrate runtime. - fn build_currency_exchange_call(&self, proof: Proof) -> Call; -} - -/// Corresponds to the Rialto instance used in the bridge runtime. -#[derive(Default, Clone, Debug)] -pub struct RialtoPoA; - -impl BridgeInstance for RialtoPoA { - fn build_signed_header_call(&self, headers: Vec) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers { - headers_with_receipts: headers - .into_iter() - .map(|header| { - ( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ) - }) - .collect(), - }; - - rialto_runtime::Call::BridgeRialtoPoa(pallet_call) - } - - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header { - header: Box::new(into_substrate_ethereum_header(header.header())), - receipts: into_substrate_ethereum_receipts(header.extra()), - }; - - rialto_runtime::Call::BridgeRialtoPoa(pallet_call) - } - - fn build_currency_exchange_call(&self, proof: Proof) -> Call { - let pallet_call = - rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction { proof }; - rialto_runtime::Call::BridgeRialtoCurrencyExchange(pallet_call) - } -} - -/// Corresponds to the Kovan instance used in the bridge runtime. -#[derive(Default, Clone, Debug)] -pub struct Kovan; - -impl BridgeInstance for Kovan { - fn build_signed_header_call(&self, headers: Vec) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers { - headers_with_receipts: headers - .into_iter() - .map(|header| { - ( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ) - }) - .collect(), - }; - - rialto_runtime::Call::BridgeKovan(pallet_call) - } - - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header { - header: Box::new(into_substrate_ethereum_header(header.header())), - receipts: into_substrate_ethereum_receipts(header.extra()), - }; - - rialto_runtime::Call::BridgeKovan(pallet_call) - } - - fn build_currency_exchange_call(&self, proof: Proof) -> Call { - let pallet_call = - rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction { proof }; - rialto_runtime::Call::BridgeKovanCurrencyExchange(pallet_call) - } -} diff --git a/relays/bin-ethereum/src/main.rs b/relays/bin-ethereum/src/main.rs deleted file mode 100644 index 99e1b48968d7..000000000000 --- a/relays/bin-ethereum/src/main.rs +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![recursion_limit = "1024"] - -mod error; -mod ethereum_client; -mod ethereum_deploy_contract; -mod ethereum_exchange; -mod ethereum_exchange_submit; -mod ethereum_sync_loop; -mod instances; -mod rialto_client; -mod rpc_errors; -mod substrate_sync_loop; -mod substrate_types; - -use anyhow::anyhow; -use ethereum_deploy_contract::EthereumDeployContractParams; -use ethereum_exchange::EthereumExchangeParams; -use ethereum_exchange_submit::EthereumExchangeSubmitParams; -use ethereum_sync_loop::EthereumSyncParams; -use headers_relay::sync::TargetTransactionMode; -use hex_literal::hex; -use instances::{BridgeInstance, Kovan, RialtoPoA}; -use libsecp256k1::SecretKey; -use relay_utils::{ - initialize::initialize_relay, - metrics::{MetricsAddress, MetricsParams}, -}; -use sp_core::crypto::Pair; -use substrate_sync_loop::SubstrateSyncParams; - -use headers_relay::sync::HeadersSyncParams; -use relay_ethereum_client::{ - ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::SigningParams as RialtoSigningParams; -use relay_substrate_client::ConnectionParams as SubstrateConnectionParams; -use std::sync::Arc; - -fn main() { - initialize_relay(); - - let yaml = clap::load_yaml!("cli.yml"); - let matches = clap::App::from_yaml(yaml).get_matches(); - async_std::task::block_on(run_command(&matches)); -} - -async fn run_command(matches: &clap::ArgMatches<'_>) { - match matches.subcommand() { - ("eth-to-sub", Some(eth_to_sub_matches)) => { - log::info!(target: "bridge", "Starting ETH âž¡ SUB relay."); - if ethereum_sync_loop::run(match ethereum_sync_params(eth_to_sub_matches) { - Ok(ethereum_sync_params) => ethereum_sync_params, - Err(err) => { - log::error!(target: "bridge", "Error parsing parameters: {}", err); - return - }, - }) - .await - .is_err() - { - log::error!(target: "bridge", "Unable to get Substrate genesis block for Ethereum sync."); - }; - }, - ("sub-to-eth", Some(sub_to_eth_matches)) => { - log::info!(target: "bridge", "Starting SUB âž¡ ETH relay."); - if substrate_sync_loop::run(match substrate_sync_params(sub_to_eth_matches) { - Ok(substrate_sync_params) => substrate_sync_params, - Err(err) => { - log::error!(target: "bridge", "Error parsing parameters: {}", err); - return - }, - }) - .await - .is_err() - { - log::error!(target: "bridge", "Unable to get Substrate genesis block for Substrate sync."); - }; - }, - ("eth-deploy-contract", Some(eth_deploy_matches)) => { - log::info!(target: "bridge", "Deploying ETH contracts."); - ethereum_deploy_contract::run( - match ethereum_deploy_contract_params(eth_deploy_matches) { - Ok(ethereum_deploy_params) => ethereum_deploy_params, - Err(err) => { - log::error!(target: "bridge", "Error during contract deployment: {}", err); - return - }, - }, - ) - .await; - }, - ("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => { - log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction."); - ethereum_exchange_submit::run( - match ethereum_exchange_submit_params(eth_exchange_submit_matches) { - Ok(eth_exchange_submit_params) => eth_exchange_submit_params, - Err(err) => { - log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err); - return - }, - }, - ) - .await; - }, - ("eth-exchange-sub", Some(eth_exchange_matches)) => { - log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay."); - ethereum_exchange::run(match ethereum_exchange_params(eth_exchange_matches) { - Ok(eth_exchange_params) => eth_exchange_params, - Err(err) => { - log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err); - return - }, - }) - .await; - }, - ("", _) => { - log::error!(target: "bridge", "No subcommand specified"); - }, - _ => unreachable!("all possible subcommands are checked above; qed"), - } -} - -fn ethereum_connection_params( - matches: &clap::ArgMatches, -) -> anyhow::Result { - let mut params = EthereumConnectionParams::default(); - if let Some(eth_host) = matches.value_of("eth-host") { - params.host = eth_host.into(); - } - if let Some(eth_port) = matches.value_of("eth-port") { - params.port = eth_port.parse().map_err(|e| anyhow!("Failed to parse eth-port: {}", e))?; - } - Ok(params) -} - -fn ethereum_signing_params(matches: &clap::ArgMatches) -> anyhow::Result { - let mut params = EthereumSigningParams::default(); - if let Some(eth_signer) = matches.value_of("eth-signer") { - params.signer = SecretKey::parse_slice( - &hex::decode(eth_signer).map_err(|e| anyhow!("Failed to parse eth-signer: {}", e))?, - ) - .map_err(|e| anyhow!("Invalid eth-signer: {}", e))?; - } - if let Some(eth_chain_id) = matches.value_of("eth-chain-id") { - params.chain_id = eth_chain_id - .parse::() - .map_err(|e| anyhow!("Failed to parse eth-chain-id: {}", e))?; - } - Ok(params) -} - -fn substrate_connection_params( - matches: &clap::ArgMatches, -) -> anyhow::Result { - let mut params = SubstrateConnectionParams::default(); - if let Some(sub_host) = matches.value_of("sub-host") { - params.host = sub_host.into(); - } - if let Some(sub_port) = matches.value_of("sub-port") { - params.port = sub_port.parse().map_err(|e| anyhow!("Failed to parse sub-port: {}", e))?; - } - Ok(params) -} - -fn rialto_signing_params(matches: &clap::ArgMatches) -> anyhow::Result { - let mut params = sp_keyring::AccountKeyring::Alice.pair(); - - if let Some(sub_signer) = matches.value_of("sub-signer") { - let sub_signer_password = matches.value_of("sub-signer-password"); - params = sp_core::sr25519::Pair::from_string(sub_signer, sub_signer_password) - .map_err(|e| anyhow!("Failed to parse sub-signer: {:?}", e))?; - } - Ok(params) -} - -fn ethereum_sync_params(matches: &clap::ArgMatches) -> anyhow::Result { - use crate::ethereum_sync_loop::consts::*; - - let mut sync_params = HeadersSyncParams { - max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, - max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, - max_headers_in_single_submit: MAX_HEADERS_IN_SINGLE_SUBMIT, - max_headers_size_in_single_submit: MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT, - prune_depth: PRUNE_DEPTH, - target_tx_mode: TargetTransactionMode::Signed, - }; - - match matches.value_of("sub-tx-mode") { - Some("signed") => sync_params.target_tx_mode = TargetTransactionMode::Signed, - Some("unsigned") => { - sync_params.target_tx_mode = TargetTransactionMode::Unsigned; - - // tx pool won't accept too much unsigned transactions - sync_params.max_headers_in_submitted_status = 10; - }, - Some("backup") => sync_params.target_tx_mode = TargetTransactionMode::Backup, - Some(mode) => return Err(anyhow!("Invalid sub-tx-mode: {}", mode)), - None => sync_params.target_tx_mode = TargetTransactionMode::Signed, - } - - let params = EthereumSyncParams { - eth_params: ethereum_connection_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_sign: rialto_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - instance: instance_params(matches)?, - sync_params, - }; - - log::debug!(target: "bridge", "Ethereum sync params: {:?}", params); - - Ok(params) -} - -fn substrate_sync_params(matches: &clap::ArgMatches) -> anyhow::Result { - use crate::substrate_sync_loop::consts::*; - - let eth_contract_address: relay_ethereum_client::types::Address = - if let Some(eth_contract) = matches.value_of("eth-contract") { - eth_contract.parse()? - } else { - "731a10897d267e19b34503ad902d0a29173ba4b1" - .parse() - .expect("address is hardcoded, thus valid; qed") - }; - - let params = SubstrateSyncParams { - sub_params: substrate_connection_params(matches)?, - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - sync_params: HeadersSyncParams { - max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, - max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, - max_headers_in_single_submit: MAX_SUBMITTED_HEADERS, - max_headers_size_in_single_submit: std::usize::MAX, - prune_depth: PRUNE_DEPTH, - target_tx_mode: TargetTransactionMode::Signed, - }, - eth_contract_address, - }; - - log::debug!(target: "bridge", "Substrate sync params: {:?}", params); - - Ok(params) -} - -fn ethereum_deploy_contract_params( - matches: &clap::ArgMatches, -) -> anyhow::Result { - let eth_contract_code = - parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| { - hex::decode(include_str!("../res/substrate-bridge-bytecode.hex")) - .expect("code is hardcoded, thus valid; qed") - }); - let sub_initial_authorities_set_id = matches - .value_of("sub-authorities-set-id") - .map(|set| { - set.parse() - .map_err(|e| anyhow!("Failed to parse sub-authorities-set-id: {}", e)) - }) - .transpose()?; - let sub_initial_authorities_set = parse_hex_argument(matches, "sub-authorities-set")?; - let sub_initial_header = parse_hex_argument(matches, "sub-initial-header")?; - - let params = EthereumDeployContractParams { - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_initial_authorities_set_id, - sub_initial_authorities_set, - sub_initial_header, - eth_contract_code, - }; - - log::debug!(target: "bridge", "Deploy params: {:?}", params); - - Ok(params) -} - -fn ethereum_exchange_submit_params( - matches: &clap::ArgMatches, -) -> anyhow::Result { - let eth_nonce = matches - .value_of("eth-nonce") - .map(|eth_nonce| { - relay_ethereum_client::types::U256::from_dec_str(eth_nonce) - .map_err(|e| anyhow!("Failed to parse eth-nonce: {}", e)) - }) - .transpose()?; - - let eth_amount = matches - .value_of("eth-amount") - .map(|eth_amount| { - eth_amount.parse().map_err(|e| anyhow!("Failed to parse eth-amount: {}", e)) - }) - .transpose()? - .unwrap_or_else(|| { - // This is in Wei, represents 1 ETH - 1_000_000_000_000_000_000_u64.into() - }); - - // This is the well-known Substrate account of Ferdie - let default_recepient = - hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c"); - - let sub_recipient = if let Some(sub_recipient) = matches.value_of("sub-recipient") { - hex::decode(&sub_recipient) - .map_err(|err| err.to_string()) - .and_then(|vsub_recipient| { - let expected_len = default_recepient.len(); - if expected_len != vsub_recipient.len() { - Err(format!("invalid length. Expected {} bytes", expected_len)) - } else { - let mut sub_recipient = default_recepient; - sub_recipient.copy_from_slice(&vsub_recipient[..expected_len]); - Ok(sub_recipient) - } - }) - .map_err(|e| anyhow!("Failed to parse sub-recipient: {}", e))? - } else { - default_recepient - }; - - let params = EthereumExchangeSubmitParams { - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - eth_nonce, - eth_amount, - sub_recipient, - }; - - log::debug!(target: "bridge", "Submit Ethereum exchange tx params: {:?}", params); - - Ok(params) -} - -fn ethereum_exchange_params(matches: &clap::ArgMatches) -> anyhow::Result { - let mode = match matches.value_of("eth-tx-hash") { - Some(eth_tx_hash) => ethereum_exchange::ExchangeRelayMode::Single( - eth_tx_hash.parse().map_err(|e| anyhow!("Failed to parse eth-tx-hash: {}", e))?, - ), - None => ethereum_exchange::ExchangeRelayMode::Auto( - matches - .value_of("eth-start-with-block") - .map(|eth_start_with_block| { - eth_start_with_block - .parse() - .map_err(|e| anyhow!("Failed to parse eth-start-with-block: {}", e)) - }) - .transpose()?, - ), - }; - - let params = EthereumExchangeParams { - eth_params: ethereum_connection_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_sign: rialto_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - instance: instance_params(matches)?, - mode, - }; - - log::debug!(target: "bridge", "Ethereum exchange params: {:?}", params); - - Ok(params) -} - -fn metrics_params(matches: &clap::ArgMatches) -> anyhow::Result { - if matches.is_present("no-prometheus") { - return Ok(None.into()) - } - - let mut metrics_params = MetricsAddress::default(); - - if let Some(prometheus_host) = matches.value_of("prometheus-host") { - metrics_params.host = prometheus_host.into(); - } - if let Some(prometheus_port) = matches.value_of("prometheus-port") { - metrics_params.port = prometheus_port - .parse() - .map_err(|e| anyhow!("Failed to parse prometheus-port: {}", e))?; - } - - Ok(Some(metrics_params).into()) -} - -fn instance_params(matches: &clap::ArgMatches) -> anyhow::Result> { - let instance = if let Some(instance) = matches.value_of("sub-pallet-instance") { - match instance.to_lowercase().as_str() { - "rialto" => Arc::new(RialtoPoA) as Arc, - "kovan" => Arc::new(Kovan), - _ => return Err(anyhow!("Unsupported bridge pallet instance")), - } - } else { - unreachable!("CLI config enforces a default instance, can never be None") - }; - - Ok(instance) -} - -fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> anyhow::Result>> { - match matches.value_of(arg) { - Some(value) => - Ok(Some(hex::decode(value).map_err(|e| anyhow!("Failed to parse {}: {}", arg, e))?)), - None => Ok(None), - } -} diff --git a/relays/bin-ethereum/src/rialto_client.rs b/relays/bin-ethereum/src/rialto_client.rs deleted file mode 100644 index 1dadf9f7ddff..000000000000 --- a/relays/bin-ethereum/src/rialto_client.rs +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - ethereum_sync_loop::QueuedEthereumHeader, instances::BridgeInstance, rpc_errors::RpcError, -}; - -use async_trait::async_trait; -use bp_eth_poa::AuraHeader as SubstrateEthereumHeader; -use codec::{Decode, Encode}; -use headers_relay::sync_types::SubmittedHeaders; -use relay_ethereum_client::types::HeaderId as EthereumHeaderId; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Client as SubstrateClient, TransactionSignScheme, UnsignedTransaction, -}; -use relay_utils::HeaderId; -use sp_core::{crypto::Pair, Bytes}; -use std::{collections::VecDeque, sync::Arc}; - -const ETH_API_IMPORT_REQUIRES_RECEIPTS: &str = "RialtoPoAHeaderApi_is_import_requires_receipts"; -const ETH_API_IS_KNOWN_BLOCK: &str = "RialtoPoAHeaderApi_is_known_block"; -const ETH_API_BEST_BLOCK: &str = "RialtoPoAHeaderApi_best_block"; -const ETH_API_BEST_FINALIZED_BLOCK: &str = "RialtoPoAHeaderApi_finalized_block"; -const EXCH_API_FILTER_TRANSACTION_PROOF: &str = - "RialtoCurrencyExchangeApi_filter_transaction_proof"; - -type RpcResult = std::result::Result; - -/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated -/// interactions involving, for example, an Ethereum bridge module. -#[async_trait] -pub trait SubstrateHighLevelRpc { - /// Returns the best Ethereum block that Substrate runtime knows of. - async fn best_ethereum_block(&self) -> RpcResult; - /// Returns best finalized Ethereum block that Substrate runtime knows of. - async fn best_ethereum_finalized_block(&self) -> RpcResult; - /// Returns whether transactions receipts are required for Ethereum header submission. - async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult; - /// Returns whether the given Ethereum header is known to the Substrate runtime. - async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult; -} - -#[async_trait] -impl SubstrateHighLevelRpc for SubstrateClient { - async fn best_ethereum_block(&self) -> RpcResult { - let call = ETH_API_BEST_BLOCK.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = self.state_call(call, data, None).await?; - let decoded_response: (u64, bp_eth_poa::H256) = - Decode::decode(&mut &encoded_response.0[..])?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) - } - - async fn best_ethereum_finalized_block(&self) -> RpcResult { - let call = ETH_API_BEST_FINALIZED_BLOCK.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = self.state_call(call, data, None).await?; - let decoded_response: (u64, bp_eth_poa::H256) = - Decode::decode(&mut &encoded_response.0[..])?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) - } - - async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult { - let call = ETH_API_IMPORT_REQUIRES_RECEIPTS.to_string(); - let data = Bytes(header.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let receipts_required: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(receipts_required) - } - - // The Substrate module could prune old headers. So this function could return false even - // if header is synced. And we'll mark corresponding Ethereum header as Orphan. - // - // But when we read the best header from Substrate next time, we will know that - // there's a better header. This Orphan will either be marked as synced, or - // eventually pruned. - async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult { - let call = ETH_API_IS_KNOWN_BLOCK.to_string(); - let data = Bytes(header_id.1.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let is_known_block: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(is_known_block) - } -} - -/// A trait for RPC calls which are used to submit Ethereum headers to a Substrate -/// runtime. These are typically calls which use a combination of other low-level RPC -/// calls. -#[async_trait] -pub trait SubmitEthereumHeaders { - /// Submits Ethereum header to Substrate runtime. - async fn submit_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - sign_transactions: bool, - ) -> SubmittedHeaders; - - /// Submits signed Ethereum header to Substrate runtime. - async fn submit_signed_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders; - - /// Submits unsigned Ethereum header to Substrate runtime. - async fn submit_unsigned_ethereum_headers( - &self, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders; -} - -#[async_trait] -impl SubmitEthereumHeaders for SubstrateClient { - async fn submit_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - sign_transactions: bool, - ) -> SubmittedHeaders { - if sign_transactions { - self.submit_signed_ethereum_headers(params, instance, headers).await - } else { - self.submit_unsigned_ethereum_headers(instance, headers).await - } - } - - async fn submit_signed_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders { - let ids = headers.iter().map(|header| header.id()).collect(); - let genesis_hash = *self.genesis_hash(); - let submission_result = async { - self.submit_signed_extrinsic( - (*params.public().as_array_ref()).into(), - move |_, transaction_nonce| { - Bytes( - Rialto::sign_transaction( - genesis_hash, - ¶ms, - relay_substrate_client::TransactionEra::immortal(), - UnsignedTransaction::new( - instance.build_signed_header_call(headers), - transaction_nonce, - ), - ) - .encode(), - ) - }, - ) - .await?; - Ok(()) - } - .await; - - match submission_result { - Ok(_) => SubmittedHeaders { - submitted: ids, - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - }, - Err(error) => SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: ids, - fatal_error: Some(error), - }, - } - } - - async fn submit_unsigned_ethereum_headers( - &self, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders { - let mut ids = headers.iter().map(|header| header.id()).collect::>(); - let mut submitted_headers = SubmittedHeaders::default(); - - for header in headers { - let id = ids.pop_front().expect("both collections have same size; qed"); - - let call = instance.build_unsigned_header_call(header); - let transaction = create_unsigned_submit_transaction(call); - - match self.submit_unsigned_extrinsic(Bytes(transaction.encode())).await { - Ok(_) => submitted_headers.submitted.push(id), - Err(error) => { - submitted_headers.rejected.push(id); - submitted_headers.rejected.extend(ids); - submitted_headers.fatal_error = Some(error.into()); - break - }, - } - } - - submitted_headers - } -} - -/// A trait for RPC calls which are used to submit proof of Ethereum exchange transaction to a -/// Substrate runtime. These are typically calls which use a combination of other low-level RPC -/// calls. -#[async_trait] -pub trait SubmitEthereumExchangeTransactionProof { - /// Pre-verify Ethereum exchange transaction proof. - async fn verify_exchange_transaction_proof( - &self, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult; - /// Submits Ethereum exchange transaction proof to Substrate runtime. - async fn submit_exchange_transaction_proof( - &self, - params: RialtoSigningParams, - instance: Arc, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult<()>; -} - -#[async_trait] -impl SubmitEthereumExchangeTransactionProof for SubstrateClient { - async fn verify_exchange_transaction_proof( - &self, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult { - let call = EXCH_API_FILTER_TRANSACTION_PROOF.to_string(); - let data = Bytes(proof.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let is_allowed: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(is_allowed) - } - - async fn submit_exchange_transaction_proof( - &self, - params: RialtoSigningParams, - instance: Arc, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult<()> { - let genesis_hash = *self.genesis_hash(); - self.submit_signed_extrinsic( - (*params.public().as_array_ref()).into(), - move |_, transaction_nonce| { - Bytes( - Rialto::sign_transaction( - genesis_hash, - ¶ms, - relay_substrate_client::TransactionEra::immortal(), - UnsignedTransaction::new( - instance.build_currency_exchange_call(proof), - transaction_nonce, - ), - ) - .encode(), - ) - }, - ) - .await?; - Ok(()) - } -} - -/// Create unsigned Substrate transaction for submitting Ethereum header. -fn create_unsigned_submit_transaction( - call: rialto_runtime::Call, -) -> rialto_runtime::UncheckedExtrinsic { - rialto_runtime::UncheckedExtrinsic::new_unsigned(call) -} diff --git a/relays/bin-ethereum/src/rpc_errors.rs b/relays/bin-ethereum/src/rpc_errors.rs deleted file mode 100644 index e91bc363839b..000000000000 --- a/relays/bin-ethereum/src/rpc_errors.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use relay_ethereum_client::Error as EthereumNodeError; -use relay_substrate_client::Error as SubstrateNodeError; -use relay_utils::MaybeConnectionError; -use thiserror::Error; - -/// Contains common errors that can occur when -/// interacting with a Substrate or Ethereum node -/// through RPC. -#[derive(Debug, Error)] -pub enum RpcError { - /// The arguments to the RPC method failed to serialize. - #[error("RPC arguments serialization failed: {0}")] - Serialization(#[from] serde_json::Error), - /// An error occurred when interacting with an Ethereum node. - #[error("Ethereum node error: {0}")] - Ethereum(#[from] EthereumNodeError), - /// An error occurred when interacting with a Substrate node. - #[error("Substrate node error: {0}")] - Substrate(#[from] SubstrateNodeError), - /// Error running relay loop. - #[error("{0}")] - SyncLoop(String), -} - -impl From for String { - fn from(err: RpcError) -> Self { - format!("{}", err) - } -} - -impl From for RpcError { - fn from(err: ethabi::Error) -> Self { - Self::Ethereum(EthereumNodeError::ResponseParseFailed(format!("{}", err))) - } -} - -impl MaybeConnectionError for RpcError { - fn is_connection_error(&self) -> bool { - match self { - RpcError::Ethereum(ref error) => error.is_connection_error(), - RpcError::Substrate(ref error) => error.is_connection_error(), - _ => false, - } - } -} - -impl From for RpcError { - fn from(err: codec::Error) -> Self { - Self::Substrate(SubstrateNodeError::ResponseParseFailed(err)) - } -} diff --git a/relays/bin-ethereum/src/substrate_sync_loop.rs b/relays/bin-ethereum/src/substrate_sync_loop.rs deleted file mode 100644 index 4b5bd4fa7326..000000000000 --- a/relays/bin-ethereum/src/substrate_sync_loop.rs +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-Substrate -> Ethereum PoA synchronization. - -use crate::{ethereum_client::EthereumHighLevelRpc, rpc_errors::RpcError}; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::HeadersSyncParams, - sync_loop::TargetClient, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; -use relay_ethereum_client::{ - types::Address, Client as EthereumClient, ConnectionParams as EthereumConnectionParams, - SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SyncHeader as RialtoSyncHeader}; -use relay_substrate_client::{ - headers_source::HeadersSource, Chain as SubstrateChain, Client as SubstrateClient, - ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; -use sp_runtime::EncodedJustification; - -use std::{collections::HashSet, fmt::Debug, time::Duration}; - -pub mod consts { - use super::*; - - /// Interval at which we check new Ethereum blocks. - pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(5); - /// Max Ethereum headers we want to have in all 'before-submitted' states. - pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 8; - /// Max Ethereum headers count we want to have in 'submitted' state. - pub const MAX_SUBMITTED_HEADERS: usize = 4; - /// Max depth of in-memory headers in all states. Past this depth they will be forgotten - /// (pruned). - pub const PRUNE_DEPTH: u32 = 256; -} - -/// Substrate synchronization parameters. -#[derive(Debug)] -pub struct SubstrateSyncParams { - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum bridge contract address. - pub eth_contract_address: Address, - /// Synchronization parameters. - pub sync_params: HeadersSyncParams, - /// Metrics parameters. - pub metrics_params: MetricsParams, -} - -/// Substrate synchronization pipeline. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct SubstrateHeadersSyncPipeline; - -impl HeadersSyncPipeline for SubstrateHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Substrate"; - const TARGET_NAME: &'static str = "Ethereum"; - - type Hash = rialto_runtime::Hash; - type Number = rialto_runtime::BlockNumber; - type Header = RialtoSyncHeader; - type Extra = (); - type Completion = EncodedJustification; - - fn estimate_size(source: &QueuedHeader) -> usize { - source.header().encode().len() - } -} - -/// Queued substrate header ID. -pub type QueuedRialtoHeader = QueuedHeader; - -/// Rialto node as headers source. -type SubstrateHeadersSource = HeadersSource; - -/// Ethereum client as Substrate headers target. -#[derive(Clone)] -struct EthereumHeadersTarget { - /// Ethereum node client. - client: EthereumClient, - /// Bridge contract address. - contract: Address, - /// Ethereum signing params. - sign_params: EthereumSigningParams, -} - -impl EthereumHeadersTarget { - fn new(client: EthereumClient, contract: Address, sign_params: EthereumSigningParams) -> Self { - Self { client, contract, sign_params } - } -} - -#[async_trait] -impl RelayClient for EthereumHeadersTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl TargetClient for EthereumHeadersTarget { - async fn best_header_id(&self) -> Result { - // we can't continue to relay headers if Ethereum node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_substrate_block(self.contract).await - } - - async fn is_known_header( - &self, - id: RialtoHeaderId, - ) -> Result<(RialtoHeaderId, bool), RpcError> { - self.client.substrate_header_known(self.contract, id).await - } - - async fn submit_headers( - &self, - headers: Vec, - ) -> SubmittedHeaders { - self.client - .submit_substrate_headers(self.sign_params.clone(), self.contract, headers) - .await - } - - async fn incomplete_headers_ids(&self) -> Result, RpcError> { - self.client.incomplete_substrate_headers(self.contract).await - } - - async fn complete_header( - &self, - id: RialtoHeaderId, - completion: EncodedJustification, - ) -> Result { - self.client - .complete_substrate_header(self.sign_params.clone(), self.contract, id, completion) - .await - } - - async fn requires_extra( - &self, - header: QueuedRialtoHeader, - ) -> Result<(RialtoHeaderId, bool), RpcError> { - Ok((header.header().id(), false)) - } -} - -/// Run Substrate headers synchronization. -pub async fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { - let SubstrateSyncParams { - sub_params, - eth_params, - eth_sign, - eth_contract_address, - sync_params, - metrics_params, - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let target = EthereumHeadersTarget::new(eth_client, eth_contract_address, eth_sign); - let source = SubstrateHeadersSource::new(sub_client); - - headers_relay::sync_loop::run( - source, - Rialto::AVERAGE_BLOCK_INTERVAL, - target, - consts::ETHEREUM_TICK_INTERVAL, - (), - sync_params, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(|e| RpcError::SyncLoop(e.to_string()))?; - - Ok(()) -} diff --git a/relays/bin-ethereum/src/substrate_types.rs b/relays/bin-ethereum/src/substrate_types.rs deleted file mode 100644 index f9e6c29c6a65..000000000000 --- a/relays/bin-ethereum/src/substrate_types.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Converting between Ethereum headers and bridge module types. - -use bp_eth_poa::{ - AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry, - Receipt as SubstrateEthereumReceipt, TransactionOutcome as SubstrateEthereumTransactionOutcome, -}; -use relay_ethereum_client::types::{ - Header as EthereumHeader, Receipt as EthereumReceipt, - HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF, -}; - -/// Convert Ethereum header into Ethereum header for Substrate. -pub fn into_substrate_ethereum_header(header: &EthereumHeader) -> SubstrateEthereumHeader { - SubstrateEthereumHeader { - parent_hash: header.parent_hash, - timestamp: header.timestamp.as_u64(), - number: header.number.expect(ETHEREUM_HEADER_ID_PROOF).as_u64(), - author: header.author, - transactions_root: header.transactions_root, - uncles_hash: header.uncles_hash, - extra_data: header.extra_data.0.clone(), - state_root: header.state_root, - receipts_root: header.receipts_root, - log_bloom: header.logs_bloom.unwrap_or_default().data().into(), - gas_used: header.gas_used, - gas_limit: header.gas_limit, - difficulty: header.difficulty, - seal: header.seal_fields.iter().map(|s| s.0.clone()).collect(), - } -} - -/// Convert Ethereum transactions receipts into Ethereum transactions receipts for Substrate. -pub fn into_substrate_ethereum_receipts( - receipts: &Option>, -) -> Option> { - receipts - .as_ref() - .map(|receipts| receipts.iter().map(into_substrate_ethereum_receipt).collect()) -} - -/// Convert Ethereum transactions receipt into Ethereum transactions receipt for Substrate. -pub fn into_substrate_ethereum_receipt(receipt: &EthereumReceipt) -> SubstrateEthereumReceipt { - SubstrateEthereumReceipt { - gas_used: receipt.cumulative_gas_used, - log_bloom: receipt.logs_bloom.data().into(), - logs: receipt - .logs - .iter() - .map(|log_entry| SubstrateEthereumLogEntry { - address: log_entry.address, - topics: log_entry.topics.clone(), - data: log_entry.data.0.clone(), - }) - .collect(), - outcome: match (receipt.status, receipt.root) { - (Some(status), None) => - SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8), - (None, Some(root)) => SubstrateEthereumTransactionOutcome::StateRoot(root), - _ => SubstrateEthereumTransactionOutcome::Unknown, - }, - } -} diff --git a/relays/bin-substrate/src/chains/kusama.rs b/relays/bin-substrate/src/chains/kusama.rs index 9e5351672dad..b12d23f2a56d 100644 --- a/relays/bin-substrate/src/chains/kusama.rs +++ b/relays/bin-substrate/src/chains/kusama.rs @@ -17,6 +17,8 @@ use codec::Decode; use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; use relay_kusama_client::Kusama; +use sp_core::storage::StorageKey; +use sp_runtime::{FixedPointNumber, FixedU128}; use sp_version::RuntimeVersion; use crate::cli::{ @@ -101,3 +103,14 @@ impl CliChain for Kusama { anyhow::bail!("Sending messages from Kusama is not yet supported.") } } + +/// Storage key and initial value of Polkadot -> Kusama conversion rate. +pub(crate) fn polkadot_to_kusama_conversion_rate_params() -> (StorageKey, FixedU128) { + ( + bp_runtime::storage_parameter_key( + bp_kusama::POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME, + ), + // starting relay before this parameter will be set to some value may cause troubles + FixedU128::from_inner(FixedU128::DIV), + ) +} diff --git a/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs b/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs index ea362bd202b1..ce631ef41e0a 100644 --- a/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs +++ b/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs @@ -64,10 +64,7 @@ impl SubstrateFinalitySyncPipeline for KusamaFinalityToPolkadot { type TargetChain = Polkadot; fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::( - Some(finality_relay::metrics_prefix::()), - params, - ) + crate::chains::add_polkadot_kusama_price_metrics::(params) } fn start_relay_guards(&self) { diff --git a/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs b/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs index 9c9dee150dc4..32133adc3e54 100644 --- a/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs +++ b/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs @@ -21,7 +21,6 @@ use std::ops::RangeInclusive; use codec::Encode; use frame_support::weights::Weight; use sp_core::{Bytes, Pair}; -use sp_runtime::{FixedPointNumber, FixedU128}; use bp_messages::MessageNonce; use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; @@ -33,7 +32,6 @@ use relay_polkadot_client::{ HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams, }; use relay_substrate_client::{Chain, Client, TransactionSignScheme, UnsignedTransaction}; -use relay_utils::metrics::MetricsParams; use substrate_relay_helper::{ messages_lane::{ select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, @@ -196,12 +194,13 @@ pub async fn run( let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = KusamaMessagesToPolkadot { message_lane: SubstrateMessageLaneToSubstrate { source_client: source_client.clone(), source_sign: params.source_sign, source_transactions_mortality: params.source_transactions_mortality, - target_client: params.target_client.clone(), + target_client: target_client.clone(), target_sign: params.target_sign, target_transactions_mortality: params.target_transactions_mortality, relayer_id_at_source: relayer_id_at_kusama, @@ -240,13 +239,10 @@ pub async fn run( stall_timeout, ); - let (metrics_params, metrics_values) = add_standalone_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - ::MessageLane, - >(&lane_id)), - params.metrics_params, - source_client.clone(), - )?; + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -272,41 +268,31 @@ pub async fn run( params.target_to_source_headers_relay, ), PolkadotTargetClient::new( - params.target_client, + target_client, lane, lane_id, - metrics_values, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - metrics_params, + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await .map_err(Into::into) } -/// Add standalone metrics for the Kusama -> Polkadot messages loop. -pub(crate) fn add_standalone_metrics( - metrics_prefix: Option, - metrics_params: MetricsParams, +/// Create standalone metrics for the Kusama -> Polkadot messages loop. +pub(crate) fn standalone_metrics( source_client: Client, -) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> { - let polkadot_to_kusama_conversion_rate_key = bp_runtime::storage_parameter_key( - bp_kusama::POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME, - ) - .0; - - substrate_relay_helper::messages_lane::add_standalone_metrics::( - metrics_prefix, - metrics_params, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( source_client, - Some(crate::chains::polkadot::TOKEN_ID), + target_client, Some(crate::chains::kusama::TOKEN_ID), - Some(( - sp_core::storage::StorageKey(polkadot_to_kusama_conversion_rate_key), - // starting relay before this parameter will be set to some value may cause troubles - FixedU128::from_inner(FixedU128::DIV), - )), + Some(crate::chains::polkadot::TOKEN_ID), + Some(crate::chains::polkadot::kusama_to_polkadot_conversion_rate_params()), + Some(crate::chains::kusama::polkadot_to_kusama_conversion_rate_params()), ) } diff --git a/relays/bin-substrate/src/chains/millau.rs b/relays/bin-substrate/src/chains/millau.rs index 1dbeab9a9049..755d7cc4430a 100644 --- a/relays/bin-substrate/src/chains/millau.rs +++ b/relays/bin-substrate/src/chains/millau.rs @@ -28,8 +28,17 @@ use bp_message_dispatch::{CallOrigin, MessagePayload}; use codec::Decode; use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight}; use relay_millau_client::Millau; +use sp_core::storage::StorageKey; +use sp_runtime::FixedU128; use sp_version::RuntimeVersion; +// Millau/Rialto tokens have no any real value, so the conversion rate we use is always 1:1. But we +// want to test our code that is intended to work with real-value chains. So to keep it close to +// 1:1, we'll be treating Rialto as BTC and Millau as wBTC (only in relayer). + +/// The identifier of token, which value is associated with Millau token value by relayer. +pub(crate) const ASSOCIATED_TOKEN_ID: &str = crate::chains::kusama::TOKEN_ID; + impl CliEncodeCall for Millau { fn max_extrinsic_size() -> u32 { bp_millau::max_extrinsic_size() @@ -123,3 +132,11 @@ impl CliChain for Millau { } } } + +/// Storage key and initial value of Rialto -> Millau conversion rate. +pub(crate) fn rialto_to_millau_conversion_rate_params() -> (StorageKey, FixedU128) { + ( + StorageKey(millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec()), + millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE, + ) +} diff --git a/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs b/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs index 3661eb78c19f..c4179eea330f 100644 --- a/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs +++ b/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs @@ -33,7 +33,6 @@ use relay_rialto_client::{ HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams, }; use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; -use relay_utils::metrics::MetricsParams; use substrate_relay_helper::{ messages_lane::{ select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, @@ -193,12 +192,13 @@ pub async fn run( let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = MillauMessagesToRialto { message_lane: SubstrateMessageLaneToSubstrate { source_client: source_client.clone(), source_sign: params.source_sign, source_transactions_mortality: params.source_transactions_mortality, - target_client: params.target_client.clone(), + target_client: target_client.clone(), target_sign: params.target_sign, target_transactions_mortality: params.target_transactions_mortality, relayer_id_at_source: relayer_id_at_millau, @@ -234,13 +234,10 @@ pub async fn run( stall_timeout, ); - let (metrics_params, metrics_values) = add_standalone_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - ::MessageLane, - >(&lane_id)), - params.metrics_params, - source_client.clone(), - )?; + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -266,37 +263,31 @@ pub async fn run( params.target_to_source_headers_relay, ), RialtoTargetClient::new( - params.target_client, + target_client, lane, lane_id, - metrics_values, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - metrics_params, + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await .map_err(Into::into) } -/// Add standalone metrics for the Millau -> Rialto messages loop. -pub(crate) fn add_standalone_metrics( - metrics_prefix: Option, - metrics_params: MetricsParams, +/// Create standalone metrics for the Millau -> Rialto messages loop. +pub(crate) fn standalone_metrics( source_client: Client, -) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> { - substrate_relay_helper::messages_lane::add_standalone_metrics::( - metrics_prefix, - metrics_params, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( source_client, - Some(crate::chains::MILLAU_ASSOCIATED_TOKEN_ID), - Some(crate::chains::RIALTO_ASSOCIATED_TOKEN_ID), - Some(( - sp_core::storage::StorageKey( - millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec(), - ), - millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE, - )), + target_client, + Some(crate::chains::millau::ASSOCIATED_TOKEN_ID), + Some(crate::chains::rialto::ASSOCIATED_TOKEN_ID), + Some(crate::chains::rialto::millau_to_rialto_conversion_rate_params()), + Some(crate::chains::millau::rialto_to_millau_conversion_rate_params()), ) } diff --git a/relays/bin-substrate/src/chains/mod.rs b/relays/bin-substrate/src/chains/mod.rs index a96d46d9ecc8..e9cb2d9b737f 100644 --- a/relays/bin-substrate/src/chains/mod.rs +++ b/relays/bin-substrate/src/chains/mod.rs @@ -39,31 +39,16 @@ mod rococo; mod westend; mod wococo; -// Millau/Rialto tokens have no any real value, so the conversion rate we use is always 1:1. But we -// want to test our code that is intended to work with real-value chains. So to keep it close to -// 1:1, we'll be treating Rialto as BTC and Millau as wBTC (only in relayer). - -/// The identifier of token, which value is associated with Rialto token value by relayer. -pub(crate) const RIALTO_ASSOCIATED_TOKEN_ID: &str = polkadot::TOKEN_ID; -/// The identifier of token, which value is associated with Millau token value by relayer. -pub(crate) const MILLAU_ASSOCIATED_TOKEN_ID: &str = kusama::TOKEN_ID; - -use relay_utils::metrics::MetricsParams; +use relay_utils::metrics::{MetricsParams, StandaloneMetric}; pub(crate) fn add_polkadot_kusama_price_metrics( - prefix: Option, params: MetricsParams, ) -> anyhow::Result { - // Polkadot/Kusama prices are added as metrics here, because atm we don't have Polkadot <-> - // Kusama relays, but we want to test metrics/dashboards in advance - Ok(relay_utils::relay_metrics(prefix, params) - .standalone_metric(|registry, prefix| { - substrate_relay_helper::helpers::token_price_metric(registry, prefix, "polkadot") - })? - .standalone_metric(|registry, prefix| { - substrate_relay_helper::helpers::token_price_metric(registry, prefix, "kusama") - })? - .into_params()) + substrate_relay_helper::helpers::token_price_metric(polkadot::TOKEN_ID)? + .register_and_spawn(¶ms.registry)?; + substrate_relay_helper::helpers::token_price_metric(kusama::TOKEN_ID)? + .register_and_spawn(¶ms.registry)?; + Ok(params) } #[cfg(test)] diff --git a/relays/bin-substrate/src/chains/polkadot.rs b/relays/bin-substrate/src/chains/polkadot.rs index 55d17e46f13b..7b6256d1749f 100644 --- a/relays/bin-substrate/src/chains/polkadot.rs +++ b/relays/bin-substrate/src/chains/polkadot.rs @@ -17,6 +17,8 @@ use codec::Decode; use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; use relay_polkadot_client::Polkadot; +use sp_core::storage::StorageKey; +use sp_runtime::{FixedPointNumber, FixedU128}; use sp_version::RuntimeVersion; use crate::cli::{ @@ -101,3 +103,14 @@ impl CliChain for Polkadot { anyhow::bail!("Sending messages from Polkadot is not yet supported.") } } + +/// Storage key and initial value of Kusama -> Polkadot conversion rate. +pub(crate) fn kusama_to_polkadot_conversion_rate_params() -> (StorageKey, FixedU128) { + ( + bp_runtime::storage_parameter_key( + bp_polkadot::KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME, + ), + // starting relay before this parameter will be set to some value may cause troubles + FixedU128::from_inner(FixedU128::DIV), + ) +} diff --git a/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs b/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs index 603d5ba3aa77..b1948b234cc3 100644 --- a/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs +++ b/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs @@ -64,10 +64,7 @@ impl SubstrateFinalitySyncPipeline for PolkadotFinalityToKusama { type TargetChain = Kusama; fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::( - Some(finality_relay::metrics_prefix::()), - params, - ) + crate::chains::add_polkadot_kusama_price_metrics::(params) } fn start_relay_guards(&self) { diff --git a/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs b/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs index b1595665fb25..bc7f22243092 100644 --- a/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs +++ b/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs @@ -32,8 +32,6 @@ use relay_polkadot_client::{ HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams, }; use relay_substrate_client::{Chain, Client, TransactionSignScheme, UnsignedTransaction}; -use relay_utils::metrics::MetricsParams; -use sp_runtime::{FixedPointNumber, FixedU128}; use substrate_relay_helper::{ messages_lane::{ select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, @@ -195,12 +193,13 @@ pub async fn run( let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = PolkadotMessagesToKusama { message_lane: SubstrateMessageLaneToSubstrate { source_client: source_client.clone(), source_sign: params.source_sign, source_transactions_mortality: params.source_transactions_mortality, - target_client: params.target_client.clone(), + target_client: target_client.clone(), target_sign: params.target_sign, target_transactions_mortality: params.target_transactions_mortality, relayer_id_at_source: relayer_id_at_polkadot, @@ -239,13 +238,10 @@ pub async fn run( stall_timeout, ); - let (metrics_params, metrics_values) = add_standalone_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - ::MessageLane, - >(&lane_id)), - params.metrics_params, - source_client.clone(), - )?; + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -271,41 +267,31 @@ pub async fn run( params.target_to_source_headers_relay, ), KusamaTargetClient::new( - params.target_client, + target_client, lane, lane_id, - metrics_values, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - metrics_params, + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await .map_err(Into::into) } -/// Add standalone metrics for the Polkadot -> Kusama messages loop. -pub(crate) fn add_standalone_metrics( - metrics_prefix: Option, - metrics_params: MetricsParams, +/// Create standalone metrics for the Polkadot -> Kusama messages loop. +pub(crate) fn standalone_metrics( source_client: Client, -) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> { - let kusama_to_polkadot_conversion_rate_key = bp_runtime::storage_parameter_key( - bp_polkadot::KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME, - ) - .0; - - substrate_relay_helper::messages_lane::add_standalone_metrics::( - metrics_prefix, - metrics_params, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( source_client, - Some(crate::chains::kusama::TOKEN_ID), + target_client, Some(crate::chains::polkadot::TOKEN_ID), - Some(( - sp_core::storage::StorageKey(kusama_to_polkadot_conversion_rate_key), - // starting relay before this parameter will be set to some value may cause troubles - FixedU128::from_inner(FixedU128::DIV), - )), + Some(crate::chains::kusama::TOKEN_ID), + Some(crate::chains::kusama::polkadot_to_kusama_conversion_rate_params()), + Some(crate::chains::polkadot::kusama_to_polkadot_conversion_rate_params()), ) } diff --git a/relays/bin-substrate/src/chains/rialto.rs b/relays/bin-substrate/src/chains/rialto.rs index 4c1a0166ed3b..2d873a24ba7a 100644 --- a/relays/bin-substrate/src/chains/rialto.rs +++ b/relays/bin-substrate/src/chains/rialto.rs @@ -28,8 +28,17 @@ use bp_message_dispatch::{CallOrigin, MessagePayload}; use codec::Decode; use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight}; use relay_rialto_client::Rialto; +use sp_core::storage::StorageKey; +use sp_runtime::FixedU128; use sp_version::RuntimeVersion; +// Millau/Rialto tokens have no any real value, so the conversion rate we use is always 1:1. But we +// want to test our code that is intended to work with real-value chains. So to keep it close to +// 1:1, we'll be treating Rialto as BTC and Millau as wBTC (only in relayer). + +/// The identifier of token, which value is associated with Rialto token value by relayer. +pub(crate) const ASSOCIATED_TOKEN_ID: &str = crate::chains::polkadot::TOKEN_ID; + impl CliEncodeCall for Rialto { fn max_extrinsic_size() -> u32 { bp_rialto::max_extrinsic_size() @@ -122,3 +131,11 @@ impl CliChain for Rialto { } } } + +/// Storage key and initial value of Millau -> Rialto conversion rate. +pub(crate) fn millau_to_rialto_conversion_rate_params() -> (StorageKey, FixedU128) { + ( + StorageKey(rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec()), + rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE, + ) +} diff --git a/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs b/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs index 50ebf264e1a4..774da017df0c 100644 --- a/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs +++ b/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs @@ -33,7 +33,6 @@ use relay_rialto_client::{ HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams, }; use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; -use relay_utils::metrics::MetricsParams; use substrate_relay_helper::{ messages_lane::{ select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, @@ -193,12 +192,13 @@ pub async fn run( let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = RialtoMessagesToMillau { message_lane: SubstrateMessageLaneToSubstrate { source_client: source_client.clone(), source_sign: params.source_sign, source_transactions_mortality: params.source_transactions_mortality, - target_client: params.target_client.clone(), + target_client: target_client.clone(), target_sign: params.target_sign, target_transactions_mortality: params.target_transactions_mortality, relayer_id_at_source: relayer_id_at_rialto, @@ -233,13 +233,10 @@ pub async fn run( stall_timeout, ); - let (metrics_params, metrics_values) = add_standalone_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - ::MessageLane, - >(&lane_id)), - params.metrics_params, - source_client.clone(), - )?; + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -265,37 +262,31 @@ pub async fn run( params.target_to_source_headers_relay, ), MillauTargetClient::new( - params.target_client, + target_client, lane, lane_id, - metrics_values, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - metrics_params, + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await .map_err(Into::into) } -/// Add standalone metrics for the Rialto -> Millau messages loop. -pub(crate) fn add_standalone_metrics( - metrics_prefix: Option, - metrics_params: MetricsParams, +/// Create standalone metrics for the Rialto -> Millau messages loop. +pub(crate) fn standalone_metrics( source_client: Client, -) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> { - substrate_relay_helper::messages_lane::add_standalone_metrics::( - metrics_prefix, - metrics_params, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( source_client, - Some(crate::chains::RIALTO_ASSOCIATED_TOKEN_ID), - Some(crate::chains::MILLAU_ASSOCIATED_TOKEN_ID), - Some(( - sp_core::storage::StorageKey( - rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec(), - ), - rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE, - )), + target_client, + Some(crate::chains::rialto::ASSOCIATED_TOKEN_ID), + Some(crate::chains::millau::ASSOCIATED_TOKEN_ID), + Some(crate::chains::millau::rialto_to_millau_conversion_rate_params()), + Some(crate::chains::rialto::millau_to_rialto_conversion_rate_params()), ) } diff --git a/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs b/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs index 25fd97a90bab..ec98cec1ec1e 100644 --- a/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs +++ b/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs @@ -59,10 +59,7 @@ impl SubstrateFinalitySyncPipeline for RococoFinalityToWococo { type TargetChain = Wococo; fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::( - Some(finality_relay::metrics_prefix::()), - params, - ) + crate::chains::add_polkadot_kusama_price_metrics::(params) } fn start_relay_guards(&self) { diff --git a/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs b/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs index 523d8c490859..d6c9040e1277 100644 --- a/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs +++ b/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs @@ -29,7 +29,6 @@ use relay_rococo_client::{ HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams, }; use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; -use relay_utils::metrics::MetricsParams; use relay_wococo_client::{ HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo, }; @@ -193,12 +192,13 @@ pub async fn run( let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = RococoMessagesToWococo { message_lane: SubstrateMessageLaneToSubstrate { source_client: source_client.clone(), source_sign: params.source_sign, source_transactions_mortality: params.source_transactions_mortality, - target_client: params.target_client.clone(), + target_client: target_client.clone(), target_sign: params.target_sign, target_transactions_mortality: params.target_transactions_mortality, relayer_id_at_source: relayer_id_at_rococo, @@ -237,13 +237,10 @@ pub async fn run( stall_timeout, ); - let (metrics_params, metrics_values) = add_standalone_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - ::MessageLane, - >(&lane_id)), - params.metrics_params, - source_client.clone(), - )?; + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -269,29 +266,28 @@ pub async fn run( params.target_to_source_headers_relay, ), WococoTargetClient::new( - params.target_client, + target_client, lane, lane_id, - metrics_values, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - metrics_params, + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await .map_err(Into::into) } -/// Add standalone metrics for the Rococo -> Wococo messages loop. -pub(crate) fn add_standalone_metrics( - metrics_prefix: Option, - metrics_params: MetricsParams, +/// Create standalone metrics for the Rococo -> Wococo messages loop. +pub(crate) fn standalone_metrics( source_client: Client, -) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> { - substrate_relay_helper::messages_lane::add_standalone_metrics::( - metrics_prefix, - metrics_params, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( source_client, + target_client, + None, None, None, None, diff --git a/relays/bin-substrate/src/chains/westend_headers_to_millau.rs b/relays/bin-substrate/src/chains/westend_headers_to_millau.rs index 64d8ba4d889d..211aa9da9bfe 100644 --- a/relays/bin-substrate/src/chains/westend_headers_to_millau.rs +++ b/relays/bin-substrate/src/chains/westend_headers_to_millau.rs @@ -57,10 +57,7 @@ impl SubstrateFinalitySyncPipeline for WestendFinalityToMillau { type TargetChain = Millau; fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::( - Some(finality_relay::metrics_prefix::()), - params, - ) + crate::chains::add_polkadot_kusama_price_metrics::(params) } fn transactions_author(&self) -> bp_millau::AccountId { diff --git a/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs b/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs index 8e11698c1bb6..fe17976d06a8 100644 --- a/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs +++ b/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs @@ -64,10 +64,7 @@ impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo { type TargetChain = Rococo; fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::( - Some(finality_relay::metrics_prefix::()), - params, - ) + crate::chains::add_polkadot_kusama_price_metrics::(params) } fn start_relay_guards(&self) { diff --git a/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs b/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs index 893aeb607ab7..dcba89e43f05 100644 --- a/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs +++ b/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs @@ -29,7 +29,6 @@ use relay_rococo_client::{ HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams, }; use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; -use relay_utils::metrics::MetricsParams; use relay_wococo_client::{ HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo, }; @@ -192,12 +191,13 @@ pub async fn run( let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = WococoMessagesToRococo { message_lane: SubstrateMessageLaneToSubstrate { source_client: source_client.clone(), source_sign: params.source_sign, source_transactions_mortality: params.source_transactions_mortality, - target_client: params.target_client.clone(), + target_client: target_client.clone(), target_sign: params.target_sign, target_transactions_mortality: params.target_transactions_mortality, relayer_id_at_source: relayer_id_at_wococo, @@ -236,13 +236,10 @@ pub async fn run( stall_timeout, ); - let (metrics_params, metrics_values) = add_standalone_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - ::MessageLane, - >(&lane_id)), - params.metrics_params, - source_client.clone(), - )?; + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -268,29 +265,28 @@ pub async fn run( params.target_to_source_headers_relay, ), RococoTargetClient::new( - params.target_client, + target_client, lane, lane_id, - metrics_values, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - metrics_params, + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await .map_err(Into::into) } -/// Add standalone metrics for the Wococo -> Rococo messages loop. -pub(crate) fn add_standalone_metrics( - metrics_prefix: Option, - metrics_params: MetricsParams, +/// Create standalone metrics for the Wococo -> Rococo messages loop. +pub(crate) fn standalone_metrics( source_client: Client, -) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> { - substrate_relay_helper::messages_lane::add_standalone_metrics::( - metrics_prefix, - metrics_params, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( source_client, + target_client, + None, None, None, None, diff --git a/relays/bin-substrate/src/cli/encode_call.rs b/relays/bin-substrate/src/cli/encode_call.rs index f496f78b29d2..e17854662e5c 100644 --- a/relays/bin-substrate/src/cli/encode_call.rs +++ b/relays/bin-substrate/src/cli/encode_call.rs @@ -345,7 +345,7 @@ mod tests { // then assert!(format!("{:?}", call_hex).starts_with( - "0x10030000000001000000381409000000000001d43593c715fdd31c61141abd04a99fd6822c8558854cc\ + "0x0f030000000001000000381409000000000001d43593c715fdd31c61141abd04a99fd6822c8558854cc\ de39a5684e7a56da27d01d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01" )) } diff --git a/relays/bin-substrate/src/cli/relay_headers.rs b/relays/bin-substrate/src/cli/relay_headers.rs index e90c663bb33a..82c55965a991 100644 --- a/relays/bin-substrate/src/cli/relay_headers.rs +++ b/relays/bin-substrate/src/cli/relay_headers.rs @@ -17,6 +17,7 @@ use structopt::StructOpt; use strum::{EnumString, EnumVariantNames, VariantNames}; +use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; use substrate_relay_helper::finality_pipeline::SubstrateFinalitySyncPipeline; use crate::cli::{ @@ -121,6 +122,8 @@ impl RelayHeaders { let target_transactions_mortality = self.target_sign.target_transactions_mortality; let target_sign = self.target_sign.to_keypair::()?; let metrics_params = Finality::customize_metrics(self.prometheus_params.into())?; + GlobalMetrics::new()?.register_and_spawn(&metrics_params.registry)?; + let finality = Finality::new(target_client.clone(), target_sign); finality.start_relay_guards(); diff --git a/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/relays/bin-substrate/src/cli/relay_headers_and_messages.rs index 076331112a21..9d76a0296fb2 100644 --- a/relays/bin-substrate/src/cli/relay_headers_and_messages.rs +++ b/relays/bin-substrate/src/cli/relay_headers_and_messages.rs @@ -34,8 +34,7 @@ use relay_substrate_client::{ use relay_utils::metrics::MetricsParams; use sp_core::{Bytes, Pair}; use substrate_relay_helper::{ - messages_lane::{MessagesRelayParams, SubstrateMessageLane}, - on_demand_headers::OnDemandHeadersRelay, + messages_lane::MessagesRelayParams, on_demand_headers::OnDemandHeadersRelay, }; use crate::{ @@ -129,11 +128,6 @@ macro_rules! select_bridge { type RightToLeftFinality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; - type LeftToRightMessages = - crate::chains::millau_messages_to_rialto::MillauMessagesToRialto; - type RightToLeftMessages = - crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau; - type LeftAccountIdConverter = bp_millau::AccountIdConverter; type RightAccountIdConverter = bp_rialto::AccountIdConverter; @@ -144,12 +138,11 @@ macro_rules! select_bridge { use crate::chains::{ millau_messages_to_rialto::{ - add_standalone_metrics as add_left_to_right_standalone_metrics, + standalone_metrics as left_to_right_standalone_metrics, run as left_to_right_messages, update_rialto_to_millau_conversion_rate as update_right_to_left_conversion_rate, }, rialto_messages_to_millau::{ - add_standalone_metrics as add_right_to_left_standalone_metrics, run as right_to_left_messages, update_millau_to_rialto_conversion_rate as update_left_to_right_conversion_rate, }, @@ -184,11 +177,6 @@ macro_rules! select_bridge { type RightToLeftFinality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo; - type LeftToRightMessages = - crate::chains::rococo_messages_to_wococo::RococoMessagesToWococo; - type RightToLeftMessages = - crate::chains::wococo_messages_to_rococo::WococoMessagesToRococo; - type LeftAccountIdConverter = bp_rococo::AccountIdConverter; type RightAccountIdConverter = bp_wococo::AccountIdConverter; @@ -199,11 +187,10 @@ macro_rules! select_bridge { use crate::chains::{ rococo_messages_to_wococo::{ - add_standalone_metrics as add_left_to_right_standalone_metrics, + standalone_metrics as left_to_right_standalone_metrics, run as left_to_right_messages, }, wococo_messages_to_rococo::{ - add_standalone_metrics as add_right_to_left_standalone_metrics, run as right_to_left_messages, }, }; @@ -253,11 +240,6 @@ macro_rules! select_bridge { type RightToLeftFinality = crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama; - type LeftToRightMessages = - crate::chains::kusama_messages_to_polkadot::KusamaMessagesToPolkadot; - type RightToLeftMessages = - crate::chains::polkadot_messages_to_kusama::PolkadotMessagesToKusama; - type LeftAccountIdConverter = bp_kusama::AccountIdConverter; type RightAccountIdConverter = bp_polkadot::AccountIdConverter; @@ -268,12 +250,11 @@ macro_rules! select_bridge { use crate::chains::{ kusama_messages_to_polkadot::{ - add_standalone_metrics as add_left_to_right_standalone_metrics, + standalone_metrics as left_to_right_standalone_metrics, run as left_to_right_messages, update_polkadot_to_kusama_conversion_rate as update_right_to_left_conversion_rate, }, polkadot_messages_to_kusama::{ - add_standalone_metrics as add_right_to_left_standalone_metrics, run as right_to_left_messages, update_kusama_to_polkadot_conversion_rate as update_left_to_right_conversion_rate, }, @@ -378,31 +359,39 @@ impl RelayHeadersAndMessages { let relayer_mode = params.shared.relayer_mode.into(); let relay_strategy = MixStrategy::new(relayer_mode); - const METRIC_IS_SOME_PROOF: &str = - "it is `None` when metric has been already registered; \ - this is the command entrypoint, so nothing has been registered yet; \ - qed"; - + // create metrics registry and register standalone metrics let metrics_params: MetricsParams = params.shared.prometheus_params.into(); - let metrics_params = relay_utils::relay_metrics(None, metrics_params).into_params(); - let (metrics_params, left_to_right_metrics) = - add_left_to_right_standalone_metrics(None, metrics_params, left_client.clone())?; - let (metrics_params, right_to_left_metrics) = - add_right_to_left_standalone_metrics(None, metrics_params, right_client.clone())?; + let metrics_params = relay_utils::relay_metrics(metrics_params).into_params(); + let left_to_right_metrics = + left_to_right_standalone_metrics(left_client.clone(), right_client.clone())?; + let right_to_left_metrics = left_to_right_metrics.clone().reverse(); + + // start conversion rate update loops for left/right chains if let Some(left_messages_pallet_owner) = left_messages_pallet_owner { let left_client = left_client.clone(); + let format_err = || { + anyhow::format_err!( + "Cannon run conversion rate updater: {} -> {}", + Right::NAME, + Left::NAME + ) + }; substrate_relay_helper::conversion_rate_update::run_conversion_rate_update_loop( left_to_right_metrics .target_to_source_conversion_rate - .expect(METRIC_IS_SOME_PROOF), + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), left_to_right_metrics .target_to_base_conversion_rate - .clone() - .expect(METRIC_IS_SOME_PROOF), + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), left_to_right_metrics .source_to_base_conversion_rate - .clone() - .expect(METRIC_IS_SOME_PROOF), + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO, move |new_rate| { log::info!( @@ -423,16 +412,29 @@ impl RelayHeadersAndMessages { } if let Some(right_messages_pallet_owner) = right_messages_pallet_owner { let right_client = right_client.clone(); + let format_err = || { + anyhow::format_err!( + "Cannon run conversion rate updater: {} -> {}", + Left::NAME, + Right::NAME + ) + }; substrate_relay_helper::conversion_rate_update::run_conversion_rate_update_loop( right_to_left_metrics .target_to_source_conversion_rate - .expect(METRIC_IS_SOME_PROOF), + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), left_to_right_metrics .source_to_base_conversion_rate - .expect(METRIC_IS_SOME_PROOF), + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), left_to_right_metrics .target_to_base_conversion_rate - .expect(METRIC_IS_SOME_PROOF), + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO, move |new_rate| { log::info!( @@ -452,6 +454,7 @@ impl RelayHeadersAndMessages { ); } + // optionally, create relayers fund account if params.shared.create_relayers_fund_accounts { let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::< AccountIdOf, @@ -490,6 +493,7 @@ impl RelayHeadersAndMessages { } } + // start on-demand header relays let left_to_right_on_demand_headers = OnDemandHeadersRelay::new( left_client.clone(), right_client.clone(), @@ -521,11 +525,8 @@ impl RelayHeadersAndMessages { source_to_target_headers_relay: Some(left_to_right_on_demand_headers.clone()), target_to_source_headers_relay: Some(right_to_left_on_demand_headers.clone()), lane_id: lane, - metrics_params: metrics_params.clone().disable().metrics_prefix( - messages_relay::message_lane_loop::metrics_prefix::< - ::MessageLane, - >(&lane), - ), + metrics_params: metrics_params.clone().disable(), + standalone_metrics: Some(left_to_right_metrics.clone()), relay_strategy: relay_strategy.clone(), }) .map_err(|e| anyhow::format_err!("{}", e)) @@ -540,11 +541,8 @@ impl RelayHeadersAndMessages { source_to_target_headers_relay: Some(right_to_left_on_demand_headers.clone()), target_to_source_headers_relay: Some(left_to_right_on_demand_headers.clone()), lane_id: lane, - metrics_params: metrics_params.clone().disable().metrics_prefix( - messages_relay::message_lane_loop::metrics_prefix::< - ::MessageLane, - >(&lane), - ), + metrics_params: metrics_params.clone().disable(), + standalone_metrics: Some(right_to_left_metrics.clone()), relay_strategy: relay_strategy.clone(), }) .map_err(|e| anyhow::format_err!("{}", e)) @@ -554,7 +552,7 @@ impl RelayHeadersAndMessages { message_relays.push(right_to_left_messages); } - relay_utils::relay_metrics(None, metrics_params) + relay_utils::relay_metrics(metrics_params) .expose() .await .map_err(|e| anyhow::format_err!("{}", e))?; diff --git a/relays/bin-substrate/src/cli/relay_messages.rs b/relays/bin-substrate/src/cli/relay_messages.rs index 4b2e0c975602..e47abfc5d94e 100644 --- a/relays/bin-substrate/src/cli/relay_messages.rs +++ b/relays/bin-substrate/src/cli/relay_messages.rs @@ -95,6 +95,7 @@ impl RelayMessages { target_to_source_headers_relay: None, lane_id: self.lane.into(), metrics_params: self.prometheus_params.into(), + standalone_metrics: None, relay_strategy, }) .await diff --git a/relays/bin-substrate/src/cli/swap_tokens.rs b/relays/bin-substrate/src/cli/swap_tokens.rs index aa3996aa4136..dbe46f469070 100644 --- a/relays/bin-substrate/src/cli/swap_tokens.rs +++ b/relays/bin-substrate/src/cli/swap_tokens.rs @@ -401,7 +401,8 @@ impl SwapTokens { .await?; if token_swap_state != None { return Err(anyhow::format_err!( - "Confirmed token swap state has been changed to {:?} unexpectedly" + "Confirmed token swap state has been changed to {:?} unexpectedly", + token_swap_state )) } } else { diff --git a/relays/client-ethereum/Cargo.toml b/relays/client-ethereum/Cargo.toml deleted file mode 100644 index 171988a32533..000000000000 --- a/relays/client-ethereum/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "relay-ethereum-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -bp-eth-poa = { path = "../../primitives/ethereum-poa" } -headers-relay = { path = "../headers" } -hex-literal = "0.3" -jsonrpsee-proc-macros = "0.3.1" -jsonrpsee-ws-client = "0.3.1" -libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"] } -log = "0.4.11" -relay-utils = { path = "../utils" } -tokio = "1.8" -web3 = { git = "https://github.com/svyatonik/rust-web3.git", branch = "bump-deps" } -thiserror = "1.0.26" diff --git a/relays/client-ethereum/src/client.rs b/relays/client-ethereum/src/client.rs deleted file mode 100644 index 48b7c9386f35..000000000000 --- a/relays/client-ethereum/src/client.rs +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - rpc::Ethereum, - types::{ - Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx, - SyncState, Transaction, TransactionHash, H256, U256, - }, - ConnectionParams, Error, Result, -}; - -use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; -use relay_utils::relay_loop::RECONNECT_DELAY; -use std::{future::Future, sync::Arc}; - -/// Number of headers missing from the Ethereum node for us to consider node not synced. -const MAJOR_SYNC_BLOCKS: u64 = 5; - -/// The client used to interact with an Ethereum node through RPC. -#[derive(Clone)] -pub struct Client { - tokio: Arc, - params: ConnectionParams, - client: Arc, -} - -impl Client { - /// Create a new Ethereum RPC Client. - /// - /// This function will keep connecting to given Ethereum node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to Ethereum node: {:?}. Going to retry in {}s", - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Ethereum node. Returns Ethereum RPC client if connection has been - /// established or error otherwise. - pub async fn try_connect(params: ConnectionParams) -> Result { - let (tokio, client) = Self::build_client(¶ms).await?; - Ok(Self { tokio, client, params }) - } - - /// Build client to use in connection. - async fn build_client( - params: &ConnectionParams, - ) -> Result<(Arc, Arc)> { - let tokio = tokio::runtime::Runtime::new()?; - let uri = format!("ws://{}:{}", params.host, params.port); - let client = tokio - .spawn(async move { RpcClientBuilder::default().build(&uri).await }) - .await??; - Ok((Arc::new(tokio), Arc::new(client))) - } - - /// Reopen client connection. - pub async fn reconnect(&mut self) -> Result<()> { - let (tokio, client) = Self::build_client(&self.params).await?; - self.tokio = tokio; - self.client = client; - Ok(()) - } -} - -impl Client { - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - self.jsonrpsee_execute(move |client| async move { - match Ethereum::syncing(&*client).await? { - SyncState::NotSyncing => Ok(()), - SyncState::Syncing(syncing) => { - let missing_headers = - syncing.highest_block.saturating_sub(syncing.current_block); - if missing_headers > MAJOR_SYNC_BLOCKS.into() { - return Err(Error::ClientNotSynced(missing_headers)) - } - - Ok(()) - }, - } - }) - .await - } - - /// Estimate gas usage for the given call. - pub async fn estimate_gas(&self, call_request: CallRequest) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::estimate_gas(&*client, call_request).await?) - }) - .await - } - - /// Retrieve number of the best known block from the Ethereum node. - pub async fn best_block_number(&self) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::block_number(&*client).await?.as_u64()) - }) - .await - } - - /// Retrieve number of the best known block from the Ethereum node. - pub async fn header_by_number(&self, block_number: u64) -> Result
{ - self.jsonrpsee_execute(move |client| async move { - let get_full_tx_objects = false; - let header = - Ethereum::get_block_by_number(&*client, block_number, get_full_tx_objects).await?; - match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { - true => Ok(header), - false => Err(Error::IncompleteHeader), - } - }) - .await - } - - /// Retrieve block header by its hash from Ethereum node. - pub async fn header_by_hash(&self, hash: H256) -> Result
{ - self.jsonrpsee_execute(move |client| async move { - let get_full_tx_objects = false; - let header = Ethereum::get_block_by_hash(&*client, hash, get_full_tx_objects).await?; - match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { - true => Ok(header), - false => Err(Error::IncompleteHeader), - } - }) - .await - } - - /// Retrieve block header and its transactions by its number from Ethereum node. - pub async fn header_by_number_with_transactions( - &self, - number: u64, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let get_full_tx_objects = true; - let header = Ethereum::get_block_by_number_with_transactions( - &*client, - number, - get_full_tx_objects, - ) - .await?; - - let is_complete_header = - header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); - if !is_complete_header { - return Err(Error::IncompleteHeader) - } - - let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); - if !is_complete_transactions { - return Err(Error::IncompleteTransaction) - } - - Ok(header) - }) - .await - } - - /// Retrieve block header and its transactions by its hash from Ethereum node. - pub async fn header_by_hash_with_transactions( - &self, - hash: H256, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let get_full_tx_objects = true; - let header = - Ethereum::get_block_by_hash_with_transactions(&*client, hash, get_full_tx_objects) - .await?; - - let is_complete_header = - header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); - if !is_complete_header { - return Err(Error::IncompleteHeader) - } - - let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); - if !is_complete_transactions { - return Err(Error::IncompleteTransaction) - } - - Ok(header) - }) - .await - } - - /// Retrieve transaction by its hash from Ethereum node. - pub async fn transaction_by_hash(&self, hash: H256) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::transaction_by_hash(&*client, hash).await?) - }) - .await - } - - /// Retrieve transaction receipt by transaction hash. - pub async fn transaction_receipt(&self, transaction_hash: H256) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::get_transaction_receipt(&*client, transaction_hash).await?) - }) - .await - } - - /// Get the nonce of the given account. - pub async fn account_nonce(&self, address: Address) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::get_transaction_count(&*client, address).await?) - }) - .await - } - - /// Submit an Ethereum transaction. - /// - /// The transaction must already be signed before sending it through this method. - pub async fn submit_transaction(&self, signed_raw_tx: SignedRawTx) -> Result { - self.jsonrpsee_execute(move |client| async move { - let transaction = Bytes(signed_raw_tx); - let tx_hash = Ethereum::submit_transaction(&*client, transaction).await?; - log::trace!(target: "bridge", "Sent transaction to Ethereum node: {:?}", tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Call Ethereum smart contract. - pub async fn eth_call(&self, call_transaction: CallRequest) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::call(&*client, call_transaction).await?) - }) - .await - } - - /// Execute jsonrpsee future in tokio context. - async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result - where - MF: FnOnce(Arc) -> F + Send + 'static, - F: Future> + Send, - T: Send + 'static, - { - let client = self.client.clone(); - self.tokio.spawn(async move { make_jsonrpsee_future(client).await }).await? - } -} diff --git a/relays/client-ethereum/src/error.rs b/relays/client-ethereum/src/error.rs deleted file mode 100644 index 6323b708fc02..000000000000 --- a/relays/client-ethereum/src/error.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum node RPC errors. - -use crate::types::U256; - -use jsonrpsee_ws_client::types::Error as RpcError; -use relay_utils::MaybeConnectionError; -use thiserror::Error; - -/// Result type used by Ethereum client. -pub type Result = std::result::Result; - -/// Errors that can occur only when interacting with -/// an Ethereum node through RPC. -#[derive(Debug, Error)] -pub enum Error { - /// IO error. - #[error("IO error: {0}")] - Io(#[from] std::io::Error), - /// An error that can occur when making an HTTP request to - /// an JSON-RPC client. - #[error("RPC error: {0}")] - RpcError(#[from] RpcError), - /// Failed to parse response. - #[error("Response parse failed: {0}")] - ResponseParseFailed(String), - /// We have received a header with missing fields. - #[error("Incomplete Ethereum Header Received (missing some of required fields - hash, number, logs_bloom).")] - IncompleteHeader, - /// We have received a transaction missing a `raw` field. - #[error("Incomplete Ethereum Transaction (missing required field - raw).")] - IncompleteTransaction, - /// An invalid Substrate block number was received from - /// an Ethereum node. - #[error("Received an invalid Substrate block from Ethereum Node.")] - InvalidSubstrateBlockNumber, - /// An invalid index has been received from an Ethereum node. - #[error("Received an invalid incomplete index from Ethereum Node.")] - InvalidIncompleteIndex, - /// The client we're connected to is not synced, so we can't rely on its state. Contains - /// number of unsynced headers. - #[error("Ethereum client is not synced: syncing {0} headers.")] - ClientNotSynced(U256), - /// Custom logic error. - #[error("{0}")] - Custom(String), -} - -impl From for Error { - fn from(error: tokio::task::JoinError) -> Self { - Error::Custom(format!("Failed to wait tokio task: {}", error)) - } -} - -impl MaybeConnectionError for Error { - fn is_connection_error(&self) -> bool { - matches!( - *self, - Error::RpcError(RpcError::Transport(_)) - // right now if connection to the ws server is dropped (after it is already established), - // we're getting this error - | Error::RpcError(RpcError::Internal(_)) - | Error::RpcError(RpcError::RestartNeeded(_)) - | Error::ClientNotSynced(_), - ) - } -} diff --git a/relays/client-ethereum/src/lib.rs b/relays/client-ethereum/src/lib.rs deleted file mode 100644 index fa4877f8e5cf..000000000000 --- a/relays/client-ethereum/src/lib.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools to interact with (Open) Ethereum node using RPC methods. - -#![warn(missing_docs)] - -mod client; -mod error; -mod rpc; -mod sign; - -pub use crate::{ - client::Client, - error::{Error, Result}, - sign::{sign_and_submit_transaction, SigningParams}, -}; - -pub mod types; - -/// Ethereum-over-websocket connection params. -#[derive(Debug, Clone)] -pub struct ConnectionParams { - /// Websocket server host name. - pub host: String, - /// Websocket server TCP port. - pub port: u16, -} - -impl Default for ConnectionParams { - fn default() -> Self { - ConnectionParams { host: "localhost".into(), port: 8546 } - } -} diff --git a/relays/client-ethereum/src/rpc.rs b/relays/client-ethereum/src/rpc.rs deleted file mode 100644 index 2479338b1015..000000000000 --- a/relays/client-ethereum/src/rpc.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum node RPC interface. - -use crate::types::{ - Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction, - TransactionHash, H256, U256, U64, -}; - -jsonrpsee_proc_macros::rpc_client_api! { - pub(crate) Ethereum { - #[rpc(method = "eth_syncing", positional_params)] - fn syncing() -> SyncState; - #[rpc(method = "eth_estimateGas", positional_params)] - fn estimate_gas(call_request: CallRequest) -> U256; - #[rpc(method = "eth_blockNumber", positional_params)] - fn block_number() -> U64; - #[rpc(method = "eth_getBlockByNumber", positional_params)] - fn get_block_by_number(block_number: U64, full_tx_objs: bool) -> Header; - #[rpc(method = "eth_getBlockByHash", positional_params)] - fn get_block_by_hash(hash: H256, full_tx_objs: bool) -> Header; - #[rpc(method = "eth_getBlockByNumber", positional_params)] - fn get_block_by_number_with_transactions(number: U64, full_tx_objs: bool) -> HeaderWithTransactions; - #[rpc(method = "eth_getBlockByHash", positional_params)] - fn get_block_by_hash_with_transactions(hash: H256, full_tx_objs: bool) -> HeaderWithTransactions; - #[rpc(method = "eth_getTransactionByHash", positional_params)] - fn transaction_by_hash(hash: H256) -> Option; - #[rpc(method = "eth_getTransactionReceipt", positional_params)] - fn get_transaction_receipt(transaction_hash: H256) -> Receipt; - #[rpc(method = "eth_getTransactionCount", positional_params)] - fn get_transaction_count(address: Address) -> U256; - #[rpc(method = "eth_submitTransaction", positional_params)] - fn submit_transaction(transaction: Bytes) -> TransactionHash; - #[rpc(method = "eth_call", positional_params)] - fn call(transaction_call: CallRequest) -> Bytes; - } -} diff --git a/relays/client-ethereum/src/sign.rs b/relays/client-ethereum/src/sign.rs deleted file mode 100644 index 86ddcc871c40..000000000000 --- a/relays/client-ethereum/src/sign.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - types::{Address, CallRequest, U256}, - Client, Result, -}; -use bp_eth_poa::signatures::{secret_to_address, SignTransaction}; -use hex_literal::hex; -use libsecp256k1::SecretKey; - -/// Ethereum signing params. -#[derive(Clone, Debug)] -pub struct SigningParams { - /// Ethereum chain id. - pub chain_id: u64, - /// Ethereum transactions signer. - pub signer: SecretKey, - /// Gas price we agree to pay. - pub gas_price: U256, -} - -impl Default for SigningParams { - fn default() -> Self { - SigningParams { - chain_id: 0x11, // Parity dev chain - // account that has a lot of ether when we run instant seal engine - // address: 0x00a329c0648769a73afac7f9381e08fb43dbea72 - // secret: 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 - signer: SecretKey::parse(&hex!( - "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" - )) - .expect("secret is hardcoded, thus valid; qed"), - gas_price: 8_000_000_000u64.into(), // 8 Gwei - } - } -} - -/// Sign and submit transaction using given Ethereum client. -pub async fn sign_and_submit_transaction( - client: &Client, - params: &SigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, -) -> Result<()> { - let nonce = if let Some(n) = nonce { - n - } else { - let address: Address = secret_to_address(¶ms.signer); - client.account_nonce(address).await? - }; - - let call_request = CallRequest { - to: contract_address, - data: Some(encoded_call.clone().into()), - ..Default::default() - }; - let gas = client.estimate_gas(call_request).await?; - - let raw_transaction = bp_eth_poa::UnsignedTransaction { - nonce, - to: contract_address, - value: U256::zero(), - gas: if double_gas { gas.saturating_mul(2.into()) } else { gas }, - gas_price: params.gas_price, - payload: encoded_call, - } - .sign_by(¶ms.signer, Some(params.chain_id)); - - let _ = client.submit_transaction(raw_transaction).await?; - Ok(()) -} diff --git a/relays/client-ethereum/src/types.rs b/relays/client-ethereum/src/types.rs deleted file mode 100644 index f589474aff1b..000000000000 --- a/relays/client-ethereum/src/types.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Common types that are used in relay <-> Ethereum node communications. - -use headers_relay::sync_types::SourceHeader; - -pub use web3::types::{Address, Bytes, CallRequest, SyncState, H256, U128, U256, U64}; - -/// When header is just received from the Ethereum node, we check that it has -/// both number and hash fields filled. -pub const HEADER_ID_PROOF: &str = "checked on retrieval; qed"; - -/// Ethereum transaction hash type. -pub type HeaderHash = H256; - -/// Ethereum transaction hash type. -pub type TransactionHash = H256; - -/// Ethereum transaction type. -pub type Transaction = web3::types::Transaction; - -/// Ethereum header type. -pub type Header = web3::types::Block; - -/// Ethereum header type used in headers sync. -#[derive(Clone, Debug, PartialEq)] -pub struct SyncHeader(Header); - -impl std::ops::Deref for SyncHeader { - type Target = Header; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// Ethereum header with transactions type. -pub type HeaderWithTransactions = web3::types::Block; - -/// Ethereum transaction receipt type. -pub type Receipt = web3::types::TransactionReceipt; - -/// Ethereum header ID. -pub type HeaderId = relay_utils::HeaderId; - -/// A raw Ethereum transaction that's been signed. -pub type SignedRawTx = Vec; - -impl From
for SyncHeader { - fn from(header: Header) -> Self { - Self(header) - } -} - -impl SourceHeader for SyncHeader { - fn id(&self) -> HeaderId { - relay_utils::HeaderId( - self.number.expect(HEADER_ID_PROOF).as_u64(), - self.hash.expect(HEADER_ID_PROOF), - ) - } - - fn parent_id(&self) -> HeaderId { - relay_utils::HeaderId(self.number.expect(HEADER_ID_PROOF).as_u64() - 1, self.parent_hash) - } -} diff --git a/relays/client-substrate/Cargo.toml b/relays/client-substrate/Cargo.toml index 6a1173581e7a..2eb07fdcde46 100644 --- a/relays/client-substrate/Cargo.toml +++ b/relays/client-substrate/Cargo.toml @@ -22,7 +22,6 @@ thiserror = "1.0.26" bp-header-chain = { path = "../../primitives/header-chain" } bp-runtime = { path = "../../primitives/runtime" } finality-relay = { path = "../finality" } -headers-relay = { path = "../headers" } relay-utils = { path = "../utils" } # Substrate Dependencies diff --git a/relays/client-substrate/src/headers_source.rs b/relays/client-substrate/src/headers_source.rs deleted file mode 100644 index e3839bf2c8ba..000000000000 --- a/relays/client-substrate/src/headers_source.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Default generic implementation of headers source for basic Substrate client. - -use crate::{ - chain::{BlockWithJustification, Chain}, - client::Client, - error::Error, -}; - -use async_trait::async_trait; -use headers_relay::{ - sync_loop::SourceClient, - sync_types::{HeaderIdOf, HeadersSyncPipeline, QueuedHeader, SourceHeader}, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_runtime::{traits::Header as HeaderT, EncodedJustification}; -use std::marker::PhantomData; - -/// Substrate node as headers source. -pub struct HeadersSource { - client: Client, - _phantom: PhantomData

, -} - -impl HeadersSource { - /// Create new headers source using given client. - pub fn new(client: Client) -> Self { - HeadersSource { client, _phantom: Default::default() } - } -} - -impl Clone for HeadersSource { - fn clone(&self) -> Self { - HeadersSource { client: self.client.clone(), _phantom: Default::default() } - } -} - -#[async_trait] -impl RelayClient for HeadersSource { - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl SourceClient

for HeadersSource -where - C: Chain, - C::BlockNumber: relay_utils::BlockNumberBase, - C::Header: Into, - P: HeadersSyncPipeline< - Extra = (), - Completion = EncodedJustification, - Hash = C::Hash, - Number = C::BlockNumber, - >, - P::Header: SourceHeader, -{ - async fn best_block_number(&self) -> Result { - // we **CAN** continue to relay headers if source node is out of sync, because - // target node may be missing headers that are already available at the source - Ok(*self.client.best_header().await?.number()) - } - - async fn header_by_hash(&self, hash: P::Hash) -> Result { - self.client.header_by_hash(hash).await.map(Into::into).map_err(Into::into) - } - - async fn header_by_number(&self, number: P::Number) -> Result { - self.client.header_by_number(number).await.map(Into::into).map_err(Into::into) - } - - async fn header_completion( - &self, - id: HeaderIdOf

, - ) -> Result<(HeaderIdOf

, Option), Error> { - let hash = id.1; - let signed_block = self.client.get_block(Some(hash)).await?; - let grandpa_justification = signed_block.justification().cloned(); - - Ok((id, grandpa_justification)) - } - - async fn header_extra( - &self, - id: HeaderIdOf

, - _header: QueuedHeader

, - ) -> Result<(HeaderIdOf

, ()), Error> { - Ok((id, ())) - } -} diff --git a/relays/client-substrate/src/lib.rs b/relays/client-substrate/src/lib.rs index 1f6606ea287c..51ddf852b9b6 100644 --- a/relays/client-substrate/src/lib.rs +++ b/relays/client-substrate/src/lib.rs @@ -26,7 +26,6 @@ mod sync_header; pub mod finality_source; pub mod guard; -pub mod headers_source; pub mod metrics; use std::time::Duration; diff --git a/relays/client-substrate/src/metrics/float_storage_value.rs b/relays/client-substrate/src/metrics/float_storage_value.rs index f591a7a98105..7dccf82b6f8e 100644 --- a/relays/client-substrate/src/metrics/float_storage_value.rs +++ b/relays/client-substrate/src/metrics/float_storage_value.rs @@ -20,7 +20,8 @@ use async_std::sync::{Arc, RwLock}; use async_trait::async_trait; use codec::Decode; use relay_utils::metrics::{ - metric_name, register, F64SharedRef, Gauge, PrometheusError, Registry, StandaloneMetrics, F64, + metric_name, register, F64SharedRef, Gauge, Metric, PrometheusError, Registry, + StandaloneMetric, F64, }; use sp_core::storage::StorageKey; use sp_runtime::{traits::UniqueSaturatedInto, FixedPointNumber}; @@ -42,8 +43,6 @@ pub struct FloatStorageValueMetric { impl FloatStorageValueMetric { /// Create new metric. pub fn new( - registry: &Registry, - prefix: Option<&str>, client: Client, storage_key: StorageKey, maybe_default_value: Option, @@ -55,7 +54,7 @@ impl FloatStorageValueMetric { client, storage_key, maybe_default_value, - metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + metric: Gauge::new(metric_name(None, &name), help)?, shared_value_ref, }) } @@ -66,8 +65,17 @@ impl FloatStorageValueMetric { } } +impl Metric for FloatStorageValueMetric +where + T: 'static + Decode + Send + Sync + FixedPointNumber, +{ + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.metric.clone(), registry).map(drop) + } +} + #[async_trait] -impl StandaloneMetrics for FloatStorageValueMetric +impl StandaloneMetric for FloatStorageValueMetric where T: 'static + Decode + Send + Sync + FixedPointNumber, { diff --git a/relays/client-substrate/src/metrics/storage_proof_overhead.rs b/relays/client-substrate/src/metrics/storage_proof_overhead.rs index c3b69c32f572..f1c770ed228e 100644 --- a/relays/client-substrate/src/metrics/storage_proof_overhead.rs +++ b/relays/client-substrate/src/metrics/storage_proof_overhead.rs @@ -18,7 +18,7 @@ use crate::{chain::Chain, client::Client, error::Error}; use async_trait::async_trait; use relay_utils::metrics::{ - metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, U64, + metric_name, register, Gauge, Metric, PrometheusError, Registry, StandaloneMetric, U64, }; use sp_core::storage::StorageKey; use sp_runtime::traits::Header as HeaderT; @@ -46,16 +46,10 @@ impl Clone for StorageProofOverheadMetric { impl StorageProofOverheadMetric { /// Create new metric instance with given name and help. - pub fn new( - registry: &Registry, - prefix: Option<&str>, - client: Client, - name: String, - help: String, - ) -> Result { + pub fn new(client: Client, name: String, help: String) -> Result { Ok(StorageProofOverheadMetric { client, - metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + metric: Gauge::new(metric_name(None, &name), help)?, }) } @@ -84,8 +78,14 @@ impl StorageProofOverheadMetric { } } +impl Metric for StorageProofOverheadMetric { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.metric.clone(), registry).map(drop) + } +} + #[async_trait] -impl StandaloneMetrics for StorageProofOverheadMetric { +impl StandaloneMetric for StorageProofOverheadMetric { fn update_interval(&self) -> Duration { C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS } diff --git a/relays/client-substrate/src/sync_header.rs b/relays/client-substrate/src/sync_header.rs index 0b74dee690f2..ed3de6289ce0 100644 --- a/relays/client-substrate/src/sync_header.rs +++ b/relays/client-substrate/src/sync_header.rs @@ -16,13 +16,10 @@ use bp_header_chain::find_grandpa_authorities_scheduled_change; use finality_relay::SourceHeader as FinalitySourceHeader; -use headers_relay::sync_types::SourceHeader; -use num_traits::{CheckedSub, One}; -use relay_utils::HeaderId; use sp_runtime::traits::Header as HeaderT; /// Generic wrapper for `sp_runtime::traits::Header` based headers, that -/// implements `headers_relay::sync_types::SourceHeader` and may be used in headers sync directly. +/// implements `finality_relay::SourceHeader` and may be used in headers sync directly. #[derive(Clone, Debug, PartialEq)] pub struct SyncHeader

(Header); @@ -47,21 +44,6 @@ impl
From
for SyncHeader
{ } } -impl SourceHeader for SyncHeader
{ - fn id(&self) -> HeaderId { - relay_utils::HeaderId(*self.0.number(), self.hash()) - } - - fn parent_id(&self) -> HeaderId { - relay_utils::HeaderId( - self.number() - .checked_sub(&One::one()) - .expect("should never be called for genesis header"), - *self.parent_hash(), - ) - } -} - impl FinalitySourceHeader for SyncHeader
{ fn number(&self) -> Header::Number { *self.0.number() diff --git a/relays/exchange/Cargo.toml b/relays/exchange/Cargo.toml deleted file mode 100644 index f08c40325ec7..000000000000 --- a/relays/exchange/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "exchange-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -anyhow = "1.0" -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -futures = "0.3.5" -log = "0.4.11" -num-traits = "0.2" -parking_lot = "0.11.0" -relay-utils = { path = "../utils" } -thiserror = "1.0.26" diff --git a/relays/exchange/src/error.rs b/relays/exchange/src/error.rs deleted file mode 100644 index aa5c427a9efb..000000000000 --- a/relays/exchange/src/error.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Exchange-relay errors. - -use crate::exchange::{BlockHashOf, BlockNumberOf, TransactionHashOf}; - -use relay_utils::MaybeConnectionError; -use std::fmt::{Debug, Display}; -use thiserror::Error; - -/// Error type given pipeline. -pub type ErrorOf

= Error, BlockNumberOf

, TransactionHashOf

>; - -/// Exchange-relay error type. -#[derive(Error, Debug)] -pub enum Error { - /// Failed to check finality of the requested header on the target node. - #[error("Failed to check finality of header {0}/{1} on {2} node: {3:?}")] - Finality(HeaderNumber, Hash, &'static str, anyhow::Error), - /// Error retrieving block from the source node. - #[error("Error retrieving block {0} from {1} node: {2:?}")] - RetrievingBlock(Hash, &'static str, anyhow::Error), - /// Error retrieving transaction from the source node. - #[error("Error retrieving transaction {0} from {1} node: {2:?}")] - RetrievingTransaction(SourceTxHash, &'static str, anyhow::Error), - /// Failed to check existence of header from the target node. - #[error("Failed to check existence of header {0}/{1} on {2} node: {3:?}")] - CheckHeaderExistence(HeaderNumber, Hash, &'static str, anyhow::Error), - /// Failed to prepare proof for the transaction from the source node. - #[error("Error building transaction {0} proof on {1} node: {2:?}")] - BuildTransactionProof(String, &'static str, anyhow::Error, bool), - /// Failed to submit the transaction proof to the target node. - #[error("Error submitting transaction {0} proof to {1} node: {2:?}")] - SubmitTransactionProof(String, &'static str, anyhow::Error, bool), - /// Transaction filtering failed. - #[error("Transaction filtering has failed with {0:?}")] - TransactionFiltering(anyhow::Error, bool), - /// Utilities/metrics error. - #[error("{0}")] - Utils(#[from] relay_utils::Error), -} - -impl MaybeConnectionError for Error { - fn is_connection_error(&self) -> bool { - match *self { - Self::BuildTransactionProof(_, _, _, b) => b, - Self::SubmitTransactionProof(_, _, _, b) => b, - Self::TransactionFiltering(_, b) => b, - _ => false, - } - } -} diff --git a/relays/exchange/src/exchange.rs b/relays/exchange/src/exchange.rs deleted file mode 100644 index b4538d2636ce..000000000000 --- a/relays/exchange/src/exchange.rs +++ /dev/null @@ -1,904 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of exchange transaction. - -use crate::error::{Error, ErrorOf}; - -use anyhow::anyhow; -use async_trait::async_trait; -use relay_utils::{relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError}; -use std::{ - fmt::{Debug, Display}, - string::ToString, -}; - -/// Transaction proof pipeline. -pub trait TransactionProofPipeline: 'static { - /// Name of the transaction proof source. - const SOURCE_NAME: &'static str; - /// Name of the transaction proof target. - const TARGET_NAME: &'static str; - - /// Block type. - type Block: SourceBlock; - /// Transaction inclusion proof type. - type TransactionProof: 'static + Send + Sync; -} - -/// Block that is participating in exchange. -pub trait SourceBlock: 'static + Send + Sync { - /// Block hash type. - type Hash: 'static + Clone + Send + Sync + Debug + Display; - /// Block number type. - type Number: 'static - + Debug - + Display - + Clone - + Copy - + Send - + Sync - + Into - + std::cmp::Ord - + std::ops::Add - + num_traits::One; - /// Block transaction. - type Transaction: SourceTransaction; - - /// Return hash of the block. - fn id(&self) -> relay_utils::HeaderId; - /// Return block transactions iterator. - fn transactions(&self) -> Vec; -} - -/// Transaction that is participating in exchange. -pub trait SourceTransaction: 'static + Send { - /// Transaction hash type. - type Hash: Debug + Display + Clone; - - /// Return transaction hash. - fn hash(&self) -> Self::Hash; -} - -/// Block hash for given pipeline. -pub type BlockHashOf

= <

::Block as SourceBlock>::Hash; - -/// Block number for given pipeline. -pub type BlockNumberOf

= <

::Block as SourceBlock>::Number; - -/// Transaction hash for given pipeline. -pub type TransactionOf

= <

::Block as SourceBlock>::Transaction; - -/// Transaction hash for given pipeline. -pub type TransactionHashOf

= as SourceTransaction>::Hash; - -/// Header id. -pub type HeaderId

= relay_utils::HeaderId, BlockNumberOf

>; - -/// Source client API. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Sleep until exchange-related data is (probably) updated. - async fn tick(&self); - /// Get block by hash. - async fn block_by_hash(&self, hash: BlockHashOf

) -> Result; - /// Get canonical block by number. - async fn block_by_number(&self, number: BlockNumberOf

) -> Result; - /// Return block + index where transaction has been **mined**. May return `Ok(None)` if - /// transaction is unknown to the source node. - async fn transaction_block( - &self, - hash: &TransactionHashOf

, - ) -> Result, usize)>, Self::Error>; - /// Prepare transaction proof. - async fn transaction_proof( - &self, - block: &P::Block, - tx_index: usize, - ) -> Result; -} - -/// Target client API. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Sleep until exchange-related data is (probably) updated. - async fn tick(&self); - /// Returns `Ok(true)` if header is known to the target node. - async fn is_header_known(&self, id: &HeaderId

) -> std::result::Result; - /// Returns `Ok(true)` if header is finalized by the target node. - async fn is_header_finalized(&self, id: &HeaderId

) -> Result; - /// Returns best finalized header id. - async fn best_finalized_header_id(&self) -> Result, Self::Error>; - /// Returns `Ok(true)` if transaction proof is need to be relayed. - async fn filter_transaction_proof( - &self, - proof: &P::TransactionProof, - ) -> Result; - /// Submits transaction proof to the target node. - async fn submit_transaction_proof(&self, proof: P::TransactionProof) - -> Result<(), Self::Error>; -} - -/// Block transaction statistics. -#[derive(Debug, Default)] -#[cfg_attr(test, derive(PartialEq))] -pub struct RelayedBlockTransactions { - /// Total number of transactions processed (either relayed or ignored) so far. - pub processed: usize, - /// Total number of transactions successfully relayed so far. - pub relayed: usize, - /// Total number of transactions that we have failed to relay so far. - pub failed: usize, -} - -/// Relay all suitable transactions from single block. -/// -/// If connection error occurs, returns Err with number of successfully processed transactions. -/// If some other error occurs, it is ignored and other transactions are processed. -/// -/// All transaction-level traces are written by this function. This function is not tracing -/// any information about block. -pub async fn relay_block_transactions( - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - source_block: &P::Block, - mut relayed_transactions: RelayedBlockTransactions, -) -> Result { - let transactions_to_process = source_block - .transactions() - .into_iter() - .enumerate() - .skip(relayed_transactions.processed); - for (source_tx_index, source_tx) in transactions_to_process { - let result = async { - let source_tx_id = format!("{}/{}", source_block.id().1, source_tx_index); - let source_tx_proof = prepare_transaction_proof( - source_client, - &source_tx_id, - source_block, - source_tx_index, - ) - .await - .map_err(|e| (FailedClient::Source, e))?; - - let needs_to_be_relayed = - target_client.filter_transaction_proof(&source_tx_proof).await.map_err(|err| { - ( - FailedClient::Target, - Error::TransactionFiltering( - anyhow!("{:?}", err), - err.is_connection_error(), - ), - ) - })?; - - if !needs_to_be_relayed { - return Ok(false) - } - - relay_ready_transaction_proof(target_client, &source_tx_id, source_tx_proof) - .await - .map(|_| true) - .map_err(|e| (FailedClient::Target, e)) - } - .await; - - // We have two options here: - // 1) retry with the same transaction later; - // 2) report error and proceed with next transaction. - // - // Option#1 may seems better, but: - // 1) we do not track if transaction is mined (without an error) by the target node; - // 2) error could be irrecoverable (e.g. when block is already pruned by bridge module or tx - // has invalid format) && we'll end up in infinite loop of retrying the same transaction - // proof. - // - // So we're going with option#2 here (the only exception are connection errors). - match result { - Ok(false) => { - relayed_transactions.processed += 1; - }, - Ok(true) => { - log::info!( - target: "bridge", - "{} transaction {} proof has been successfully submitted to {} node", - P::SOURCE_NAME, - source_tx.hash(), - P::TARGET_NAME, - ); - - relayed_transactions.processed += 1; - relayed_transactions.relayed += 1; - }, - Err((failed_client, err)) => { - log::error!( - target: "bridge", - "Error relaying {} transaction {} proof to {} node: {}. {}", - P::SOURCE_NAME, - source_tx.hash(), - P::TARGET_NAME, - err.to_string(), - if err.is_connection_error() { - "Going to retry after delay..." - } else { - "You may need to submit proof of this transaction manually" - }, - ); - - if err.is_connection_error() { - return Err((failed_client, relayed_transactions)) - } - - relayed_transactions.processed += 1; - relayed_transactions.failed += 1; - }, - } - } - - Ok(relayed_transactions) -} - -/// Relay single transaction proof. -pub async fn relay_single_transaction_proof( - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - source_tx_hash: TransactionHashOf

, -) -> Result<(), ErrorOf

> { - // wait for transaction and header on source node - let (source_header_id, source_tx_index) = - wait_transaction_mined(source_client, &source_tx_hash).await?; - let source_block = source_client.block_by_hash(source_header_id.1.clone()).await; - let source_block = source_block.map_err(|err| { - Error::RetrievingBlock(source_header_id.1.clone(), P::SOURCE_NAME, anyhow!("{:?}", err)) - })?; - // wait for transaction and header on target node - wait_header_imported(target_client, &source_header_id).await?; - wait_header_finalized(target_client, &source_header_id).await?; - - // and finally - prepare and submit transaction proof to target node - let source_tx_id = format!("{}", source_tx_hash); - relay_ready_transaction_proof( - target_client, - &source_tx_id, - prepare_transaction_proof(source_client, &source_tx_id, &source_block, source_tx_index) - .await?, - ) - .await - .map_err(Into::into) -} - -/// Prepare transaction proof. -async fn prepare_transaction_proof( - source_client: &impl SourceClient

, - source_tx_id: &str, - source_block: &P::Block, - source_tx_index: usize, -) -> Result> { - source_client - .transaction_proof(source_block, source_tx_index) - .await - .map_err(|err| { - Error::BuildTransactionProof( - source_tx_id.to_owned(), - P::SOURCE_NAME, - anyhow!("{:?}", err), - err.is_connection_error(), - ) - }) -} - -/// Relay prepared proof of transaction. -async fn relay_ready_transaction_proof( - target_client: &impl TargetClient

, - source_tx_id: &str, - source_tx_proof: P::TransactionProof, -) -> Result<(), ErrorOf

> { - target_client.submit_transaction_proof(source_tx_proof).await.map_err(|err| { - Error::SubmitTransactionProof( - source_tx_id.to_owned(), - P::TARGET_NAME, - anyhow!("{:?}", err), - err.is_connection_error(), - ) - }) -} - -/// Wait until transaction is mined by source node. -async fn wait_transaction_mined( - source_client: &impl SourceClient

, - source_tx_hash: &TransactionHashOf

, -) -> Result<(HeaderId

, usize), ErrorOf

> { - loop { - let source_header_and_tx = - source_client.transaction_block(source_tx_hash).await.map_err(|err| { - Error::RetrievingTransaction( - source_tx_hash.clone(), - P::SOURCE_NAME, - anyhow!("{:?}", err), - ) - })?; - match source_header_and_tx { - Some((source_header_id, source_tx)) => { - log::info!( - target: "bridge", - "Transaction {} is retrieved from {} node. Continuing...", - source_tx_hash, - P::SOURCE_NAME, - ); - - return Ok((source_header_id, source_tx)) - }, - None => { - log::info!( - target: "bridge", - "Waiting for transaction {} to be mined by {} node...", - source_tx_hash, - P::SOURCE_NAME, - ); - - source_client.tick().await; - }, - } - } -} - -/// Wait until target node imports required header. -async fn wait_header_imported( - target_client: &impl TargetClient

, - source_header_id: &HeaderId

, -) -> Result<(), ErrorOf

> { - loop { - let is_header_known = - target_client.is_header_known(source_header_id).await.map_err(|err| { - Error::CheckHeaderExistence( - source_header_id.0, - source_header_id.1.clone(), - P::TARGET_NAME, - anyhow!("{:?}", err), - ) - })?; - match is_header_known { - true => { - log::info!( - target: "bridge", - "Header {}/{} is known to {} node. Continuing.", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - return Ok(()) - }, - false => { - log::info!( - target: "bridge", - "Waiting for header {}/{} to be imported by {} node...", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - target_client.tick().await; - }, - } - } -} - -/// Wait until target node finalizes required header. -async fn wait_header_finalized( - target_client: &impl TargetClient

, - source_header_id: &HeaderId

, -) -> Result<(), ErrorOf

> { - loop { - let is_header_finalized = - target_client.is_header_finalized(source_header_id).await.map_err(|err| { - Error::Finality( - source_header_id.0, - source_header_id.1.clone(), - P::TARGET_NAME, - anyhow!("{:?}", err), - ) - })?; - match is_header_finalized { - true => { - log::info!( - target: "bridge", - "Header {}/{} is finalizd by {} node. Continuing.", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - return Ok(()) - }, - false => { - log::info!( - target: "bridge", - "Waiting for header {}/{} to be finalized by {} node...", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - target_client.tick().await; - }, - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - - use parking_lot::Mutex; - use relay_utils::HeaderId; - use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - }; - - pub fn test_block_id() -> TestHeaderId { - HeaderId(1, 1) - } - - pub fn test_next_block_id() -> TestHeaderId { - HeaderId(2, 2) - } - - pub fn test_transaction_hash(tx_index: u64) -> TestTransactionHash { - 200 + tx_index - } - - pub fn test_transaction(tx_index: u64) -> TestTransaction { - TestTransaction(test_transaction_hash(tx_index)) - } - - pub fn test_block() -> TestBlock { - TestBlock(test_block_id(), vec![test_transaction(0)]) - } - - pub fn test_next_block() -> TestBlock { - TestBlock(test_next_block_id(), vec![test_transaction(1)]) - } - - pub type TestBlockNumber = u64; - pub type TestBlockHash = u64; - pub type TestTransactionHash = u64; - pub type TestHeaderId = HeaderId; - - #[derive(Debug, Clone, PartialEq)] - pub struct TestError(pub bool); - - impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - self.0 - } - } - - pub struct TestTransactionProofPipeline; - - impl TransactionProofPipeline for TestTransactionProofPipeline { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type Block = TestBlock; - type TransactionProof = TestTransactionProof; - } - - #[derive(Debug, Clone)] - pub struct TestBlock(pub TestHeaderId, pub Vec); - - impl SourceBlock for TestBlock { - type Hash = TestBlockHash; - type Number = TestBlockNumber; - type Transaction = TestTransaction; - - fn id(&self) -> TestHeaderId { - self.0 - } - - fn transactions(&self) -> Vec { - self.1.clone() - } - } - - #[derive(Debug, Clone)] - pub struct TestTransaction(pub TestTransactionHash); - - impl SourceTransaction for TestTransaction { - type Hash = TestTransactionHash; - - fn hash(&self) -> Self::Hash { - self.0 - } - } - - #[derive(Debug, Clone, PartialEq)] - pub struct TestTransactionProof(pub TestTransactionHash); - - #[derive(Clone)] - pub struct TestTransactionsSource { - pub on_tick: Arc, - pub data: Arc>, - } - - pub struct TestTransactionsSourceData { - pub block: Result, - pub transaction_block: Result, TestError>, - pub proofs_to_fail: HashMap, - } - - impl TestTransactionsSource { - pub fn new(on_tick: Box) -> Self { - Self { - on_tick: Arc::new(on_tick), - data: Arc::new(Mutex::new(TestTransactionsSourceData { - block: Ok(test_block()), - transaction_block: Ok(Some((test_block_id(), 0))), - proofs_to_fail: HashMap::new(), - })), - } - } - } - - #[async_trait] - impl RelayClient for TestTransactionsSource { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - Ok(()) - } - } - - #[async_trait] - impl SourceClient for TestTransactionsSource { - async fn tick(&self) { - (self.on_tick)(&mut *self.data.lock()) - } - - async fn block_by_hash(&self, _: TestBlockHash) -> Result { - self.data.lock().block.clone() - } - - async fn block_by_number(&self, _: TestBlockNumber) -> Result { - self.data.lock().block.clone() - } - - async fn transaction_block( - &self, - _: &TestTransactionHash, - ) -> Result, TestError> { - self.data.lock().transaction_block.clone() - } - - async fn transaction_proof( - &self, - block: &TestBlock, - index: usize, - ) -> Result { - let tx_hash = block.1[index].hash(); - let proof_error = self.data.lock().proofs_to_fail.get(&tx_hash).cloned(); - if let Some(err) = proof_error { - return Err(err) - } - - Ok(TestTransactionProof(tx_hash)) - } - } - - #[derive(Clone)] - pub struct TestTransactionsTarget { - pub on_tick: Arc, - pub data: Arc>, - } - - pub struct TestTransactionsTargetData { - pub is_header_known: Result, - pub is_header_finalized: Result, - pub best_finalized_header_id: Result, - pub transactions_to_accept: HashSet, - pub submitted_proofs: Vec, - } - - impl TestTransactionsTarget { - pub fn new(on_tick: Box) -> Self { - Self { - on_tick: Arc::new(on_tick), - data: Arc::new(Mutex::new(TestTransactionsTargetData { - is_header_known: Ok(true), - is_header_finalized: Ok(true), - best_finalized_header_id: Ok(test_block_id()), - transactions_to_accept: vec![test_transaction_hash(0)].into_iter().collect(), - submitted_proofs: Vec::new(), - })), - } - } - } - - #[async_trait] - impl RelayClient for TestTransactionsTarget { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - Ok(()) - } - } - - #[async_trait] - impl TargetClient for TestTransactionsTarget { - async fn tick(&self) { - (self.on_tick)(&mut *self.data.lock()) - } - - async fn is_header_known(&self, _: &TestHeaderId) -> Result { - self.data.lock().is_header_known.clone() - } - - async fn is_header_finalized(&self, _: &TestHeaderId) -> Result { - self.data.lock().is_header_finalized.clone() - } - - async fn best_finalized_header_id(&self) -> Result { - self.data.lock().best_finalized_header_id.clone() - } - - async fn filter_transaction_proof( - &self, - proof: &TestTransactionProof, - ) -> Result { - Ok(self.data.lock().transactions_to_accept.contains(&proof.0)) - } - - async fn submit_transaction_proof( - &self, - proof: TestTransactionProof, - ) -> Result<(), TestError> { - self.data.lock().submitted_proofs.push(proof); - Ok(()) - } - } - - fn ensure_relay_single_success( - source: &TestTransactionsSource, - target: &TestTransactionsTarget, - ) { - assert!(async_std::task::block_on(relay_single_transaction_proof( - source, - target, - test_transaction_hash(0) - )) - .is_ok()); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(0))], - ); - } - - fn ensure_relay_single_failure(source: TestTransactionsSource, target: TestTransactionsTarget) { - assert!(async_std::task::block_on(relay_single_transaction_proof( - &source, - &target, - test_transaction_hash(0), - )) - .is_err()); - assert!(target.data.lock().submitted_proofs.is_empty()); - } - - #[test] - fn ready_transaction_proof_relayed_immediately() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_waits_for_transaction_to_be_mined() { - let source = TestTransactionsSource::new(Box::new(|source_data| { - assert_eq!(source_data.transaction_block, Ok(None)); - source_data.transaction_block = Ok(Some((test_block_id(), 0))); - })); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // transaction is not yet mined, but will be available after first wait (tick) - source.data.lock().transaction_block = Ok(None); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_fails_when_transaction_retrieval_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - source.data.lock().transaction_block = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_fails_when_proof_retrieval_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(0), TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_waits_for_header_to_be_imported() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|target_data| { - assert_eq!(target_data.is_header_known, Ok(false)); - target_data.is_header_known = Ok(true); - })); - - // header is not yet imported, but will be available after first wait (tick) - target.data.lock().is_header_known = Ok(false); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_fails_when_is_header_known_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target.data.lock().is_header_known = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_waits_for_header_to_be_finalized() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|target_data| { - assert_eq!(target_data.is_header_finalized, Ok(false)); - target_data.is_header_finalized = Ok(true); - })); - - // header is not yet finalized, but will be available after first wait (tick) - target.data.lock().is_header_finalized = Ok(false); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_fails_when_is_header_finalized_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target.data.lock().is_header_finalized = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_fails_when_target_node_rejects_proof() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target.data.lock().transactions_to_accept.remove(&test_transaction_hash(0)); - - ensure_relay_single_success(&source, &target) - } - - fn test_relay_block_transactions( - source: &TestTransactionsSource, - target: &TestTransactionsTarget, - pre_relayed: RelayedBlockTransactions, - ) -> Result { - async_std::task::block_on(relay_block_transactions( - source, - target, - &TestBlock( - test_block_id(), - vec![test_transaction(0), test_transaction(1), test_transaction(2)], - ), - pre_relayed, - )) - .map_err(|(_, transactions)| transactions) - } - - #[test] - fn relay_block_transactions_process_all_transactions() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // let's only accept tx#1 - target.data.lock().transactions_to_accept.remove(&test_transaction_hash(0)); - target.data.lock().transactions_to_accept.insert(test_transaction_hash(1)); - - let relayed_transactions = - test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { processed: 3, relayed: 1, failed: 0 }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(1))], - ); - } - - #[test] - fn relay_block_transactions_ignores_transaction_failure() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // let's reject proof for tx#0 - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(0), TestError(false)); - - let relayed_transactions = - test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { processed: 3, relayed: 0, failed: 1 }), - ); - assert_eq!(target.data.lock().submitted_proofs, vec![],); - } - - #[test] - fn relay_block_transactions_fails_on_connection_error() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // fail with connection error when preparing proof for tx#1 - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(1), TestError(true)); - - let relayed_transactions = - test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Err(RelayedBlockTransactions { processed: 1, relayed: 1, failed: 0 }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(0))], - ); - - // now do not fail on tx#2 - source.data.lock().proofs_to_fail.clear(); - // and also relay tx#3 - target.data.lock().transactions_to_accept.insert(test_transaction_hash(2)); - - let relayed_transactions = - test_relay_block_transactions(&source, &target, relayed_transactions.unwrap_err()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { processed: 3, relayed: 2, failed: 0 }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![ - TestTransactionProof(test_transaction_hash(0)), - TestTransactionProof(test_transaction_hash(2)) - ], - ); - } -} diff --git a/relays/exchange/src/exchange_loop.rs b/relays/exchange/src/exchange_loop.rs deleted file mode 100644 index 84d216f43968..000000000000 --- a/relays/exchange/src/exchange_loop.rs +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of exchange transactions. - -use crate::{ - error::Error, - exchange::{ - relay_block_transactions, BlockNumberOf, RelayedBlockTransactions, SourceClient, - TargetClient, TransactionProofPipeline, - }, - exchange_loop_metrics::ExchangeLoopMetrics, -}; - -use crate::error::ErrorOf; -use backoff::backoff::Backoff; -use futures::{future::FutureExt, select}; -use num_traits::One; -use relay_utils::{ - metrics::{GlobalMetrics, MetricsParams}, - retry_backoff, FailedClient, MaybeConnectionError, -}; -use std::future::Future; - -/// Transactions proofs relay state. -#[derive(Debug)] -pub struct TransactionProofsRelayState { - /// Number of last header we have processed so far. - pub best_processed_header_number: BlockNumber, -} - -/// Transactions proofs relay storage. -pub trait TransactionProofsRelayStorage: 'static + Clone + Send + Sync { - /// Associated block number. - type BlockNumber: 'static + Send + Sync; - - /// Get relay state. - fn state(&self) -> TransactionProofsRelayState; - /// Update relay state. - fn set_state(&mut self, state: &TransactionProofsRelayState); -} - -/// In-memory storage for auto-relay loop. -#[derive(Debug, Clone)] -pub struct InMemoryStorage { - best_processed_header_number: BlockNumber, -} - -impl InMemoryStorage { - /// Created new in-memory storage with given best processed block number. - pub fn new(best_processed_header_number: BlockNumber) -> Self { - InMemoryStorage { best_processed_header_number } - } -} - -impl TransactionProofsRelayStorage - for InMemoryStorage -{ - type BlockNumber = BlockNumber; - - fn state(&self) -> TransactionProofsRelayState { - TransactionProofsRelayState { - best_processed_header_number: self.best_processed_header_number, - } - } - - fn set_state(&mut self, state: &TransactionProofsRelayState) { - self.best_processed_header_number = state.best_processed_header_number; - } -} - -/// Return prefix that will be used by default to expose Prometheus metrics of the exchange loop. -pub fn metrics_prefix() -> String { - format!("{}_to_{}_Exchange", P::SOURCE_NAME, P::TARGET_NAME) -} - -/// Run proofs synchronization. -pub async fn run( - storage: impl TransactionProofsRelayStorage>, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_params: MetricsParams, - exit_signal: impl Future + 'static + Send, -) -> Result<(), ErrorOf

> { - let exit_signal = exit_signal.shared(); - - relay_utils::relay_loop(source_client, target_client) - .with_metrics(Some(metrics_prefix::

()), metrics_params) - .loop_metric(ExchangeLoopMetrics::new)? - .standalone_metric(GlobalMetrics::new)? - .expose() - .await? - .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - run_until_connection_lost( - storage.clone(), - source_client, - target_client, - metrics, - exit_signal.clone(), - ) - }) - .await - .map_err(Error::Utils) -} - -/// Run proofs synchronization. -async fn run_until_connection_lost( - mut storage: impl TransactionProofsRelayStorage>, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_exch: Option, - exit_signal: impl Future + Send, -) -> Result<(), FailedClient> { - let mut retry_backoff = retry_backoff(); - let mut state = storage.state(); - let mut current_finalized_block = None; - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!(exit_signal); - - loop { - let iteration_result = run_loop_iteration( - &mut storage, - &source_client, - &target_client, - &mut state, - &mut current_finalized_block, - metrics_exch.as_ref(), - ) - .await; - - if let Err((is_connection_error, failed_client)) = iteration_result { - if is_connection_error { - return Err(failed_client) - } - - let retry_timeout = - retry_backoff.next_backoff().unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY); - select! { - _ = async_std::task::sleep(retry_timeout).fuse() => {}, - _ = exit_signal => return Ok(()), - } - } else { - retry_backoff.reset(); - - select! { - _ = source_client.tick().fuse() => {}, - _ = exit_signal => return Ok(()), - } - } - } -} - -/// Run exchange loop until we need to break. -async fn run_loop_iteration( - storage: &mut impl TransactionProofsRelayStorage>, - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - state: &mut TransactionProofsRelayState>, - current_finalized_block: &mut Option<(P::Block, RelayedBlockTransactions)>, - exchange_loop_metrics: Option<&ExchangeLoopMetrics>, -) -> Result<(), (bool, FailedClient)> { - let best_finalized_header_id = match target_client.best_finalized_header_id().await { - Ok(best_finalized_header_id) => { - log::debug!( - target: "bridge", - "Got best finalized {} block from {} node: {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - best_finalized_header_id, - ); - - best_finalized_header_id - }, - Err(err) => { - log::error!( - target: "bridge", - "Failed to retrieve best {} header id from {} node: {:?}. Going to retry...", - P::SOURCE_NAME, - P::TARGET_NAME, - err, - ); - - return Err((err.is_connection_error(), FailedClient::Target)) - }, - }; - - loop { - // if we already have some finalized block body, try to relay its transactions - if let Some((block, relayed_transactions)) = current_finalized_block.take() { - let result = relay_block_transactions( - source_client, - target_client, - &block, - relayed_transactions, - ) - .await; - - match result { - Ok(relayed_transactions) => { - log::info!( - target: "bridge", - "Relay has processed {} block #{}. Total/Relayed/Failed transactions: {}/{}/{}", - P::SOURCE_NAME, - state.best_processed_header_number, - relayed_transactions.processed, - relayed_transactions.relayed, - relayed_transactions.failed, - ); - - state.best_processed_header_number = - state.best_processed_header_number + One::one(); - storage.set_state(state); - - if let Some(exchange_loop_metrics) = exchange_loop_metrics { - exchange_loop_metrics.update::

( - state.best_processed_header_number, - best_finalized_header_id.0, - relayed_transactions, - ); - } - - // we have just updated state => proceed to next block retrieval - }, - Err((failed_client, relayed_transactions)) => { - *current_finalized_block = Some((block, relayed_transactions)); - return Err((true, failed_client)) - }, - } - } - - // we may need to retrieve finalized block body from source node - if best_finalized_header_id.0 > state.best_processed_header_number { - let next_block_number = state.best_processed_header_number + One::one(); - let result = source_client.block_by_number(next_block_number).await; - - match result { - Ok(block) => { - *current_finalized_block = Some((block, RelayedBlockTransactions::default())); - - // we have received new finalized block => go back to relay its transactions - continue - }, - Err(err) => { - log::error!( - target: "bridge", - "Failed to retrieve canonical block #{} from {} node: {:?}. Going to retry...", - next_block_number, - P::SOURCE_NAME, - err, - ); - - return Err((err.is_connection_error(), FailedClient::Source)) - }, - } - } - - // there are no any transactions we need to relay => wait for new data - return Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::exchange::tests::{ - test_next_block, test_next_block_id, test_transaction_hash, TestTransactionProof, - TestTransactionsSource, TestTransactionsTarget, - }; - use futures::{future::FutureExt, stream::StreamExt}; - - #[test] - fn exchange_loop_is_able_to_relay_proofs() { - let storage = InMemoryStorage { best_processed_header_number: 0 }; - let target = - TestTransactionsTarget::new(Box::new(|_| unreachable!("no target ticks allowed"))); - let target_data = target.data.clone(); - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - - let source = TestTransactionsSource::new(Box::new(move |data| { - let transaction1_relayed = target_data - .lock() - .submitted_proofs - .contains(&TestTransactionProof(test_transaction_hash(0))); - let transaction2_relayed = target_data - .lock() - .submitted_proofs - .contains(&TestTransactionProof(test_transaction_hash(1))); - match (transaction1_relayed, transaction2_relayed) { - (true, true) => exit_sender.unbounded_send(()).unwrap(), - (true, false) => { - data.block = Ok(test_next_block()); - target_data.lock().best_finalized_header_id = Ok(test_next_block_id()); - target_data.lock().transactions_to_accept.insert(test_transaction_hash(1)); - }, - _ => (), - } - })); - - let _ = async_std::task::block_on(run( - storage, - source, - target, - MetricsParams::disabled(), - exit_receiver.into_future().map(|(_, _)| ()), - )); - } -} diff --git a/relays/exchange/src/exchange_loop_metrics.rs b/relays/exchange/src/exchange_loop_metrics.rs deleted file mode 100644 index 82d3e649d431..000000000000 --- a/relays/exchange/src/exchange_loop_metrics.rs +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for currency-exchange relay loop. - -use crate::exchange::{BlockNumberOf, RelayedBlockTransactions, TransactionProofPipeline}; -use relay_utils::metrics::{ - metric_name, register, Counter, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64, -}; - -/// Exchange transactions relay metrics. -#[derive(Clone)] -pub struct ExchangeLoopMetrics { - /// Best finalized block numbers - "processed" and "known". - best_block_numbers: GaugeVec, - /// Number of processed blocks ("total"). - processed_blocks: Counter, - /// Number of processed transactions ("total", "relayed" and "failed"). - processed_transactions: CounterVec, -} - -impl ExchangeLoopMetrics { - /// Create and register exchange loop metrics. - pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { - Ok(ExchangeLoopMetrics { - best_block_numbers: register( - GaugeVec::new( - Opts::new( - metric_name(prefix, "best_block_numbers"), - "Best finalized block numbers", - ), - &["type"], - )?, - registry, - )?, - processed_blocks: register( - Counter::new( - metric_name(prefix, "processed_blocks"), - "Total number of processed blocks", - )?, - registry, - )?, - processed_transactions: register( - CounterVec::new( - Opts::new( - metric_name(prefix, "processed_transactions"), - "Total number of processed transactions", - ), - &["type"], - )?, - registry, - )?, - }) - } -} - -impl ExchangeLoopMetrics { - /// Update metrics when single block is relayed. - pub fn update( - &self, - best_processed_block_number: BlockNumberOf

, - best_known_block_number: BlockNumberOf

, - relayed_transactions: RelayedBlockTransactions, - ) { - self.best_block_numbers - .with_label_values(&["processed"]) - .set(best_processed_block_number.into()); - self.best_block_numbers - .with_label_values(&["known"]) - .set(best_known_block_number.into()); - - self.processed_blocks.inc(); - - self.processed_transactions - .with_label_values(&["total"]) - .inc_by(relayed_transactions.processed as _); - self.processed_transactions - .with_label_values(&["relayed"]) - .inc_by(relayed_transactions.relayed as _); - self.processed_transactions - .with_label_values(&["failed"]) - .inc_by(relayed_transactions.failed as _); - } -} diff --git a/relays/exchange/src/lib.rs b/relays/exchange/src/lib.rs deleted file mode 100644 index d167e5aa398e..000000000000 --- a/relays/exchange/src/lib.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying [`currency-exchange`](../pallet_bridge_currency_exchange/index.html) application -//! specific data. Currency exchange application allows exchanging tokens between bridged chains. -//! This module provides entrypoints for crafting and submitting (single and multiple) -//! proof-of-exchange-at-source-chain transaction(s) to target chain. - -#![warn(missing_docs)] - -pub mod error; -pub mod exchange; -pub mod exchange_loop; -pub mod exchange_loop_metrics; diff --git a/relays/finality/Cargo.toml b/relays/finality/Cargo.toml index 944da9837ffc..645ac10775ba 100644 --- a/relays/finality/Cargo.toml +++ b/relays/finality/Cargo.toml @@ -12,7 +12,6 @@ async-trait = "0.1.40" backoff = "0.2" bp-header-chain = { path = "../../primitives/header-chain" } futures = "0.3.5" -headers-relay = { path = "../headers" } log = "0.4.11" num-traits = "0.2" relay-utils = { path = "../utils" } diff --git a/relays/finality/src/finality_loop.rs b/relays/finality/src/finality_loop.rs index 191d18383793..320b44d310f0 100644 --- a/relays/finality/src/finality_loop.rs +++ b/relays/finality/src/finality_loop.rs @@ -19,17 +19,17 @@ //! is the mandatory headers, which we always submit to the target node. For such headers, we //! assume that the persistent proof either exists, or will eventually become available. -use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader}; +use crate::{ + sync_loop_metrics::SyncLoopMetrics, FinalityProof, FinalitySyncPipeline, SourceHeader, +}; use async_trait::async_trait; use backoff::backoff::Backoff; use futures::{select, Future, FutureExt, Stream, StreamExt}; -use headers_relay::sync_loop_metrics::SyncLoopMetrics; use num_traits::{One, Saturating}; use relay_utils::{ - metrics::{GlobalMetrics, MetricsParams}, - relay_loop::Client as RelayClient, - retry_backoff, FailedClient, MaybeConnectionError, + metrics::MetricsParams, relay_loop::Client as RelayClient, retry_backoff, FailedClient, + MaybeConnectionError, }; use std::{ pin::Pin, @@ -113,9 +113,8 @@ pub async fn run( ) -> Result<(), relay_utils::Error> { let exit_signal = exit_signal.shared(); relay_utils::relay_loop(source_client, target_client) - .with_metrics(Some(metrics_prefix::

()), metrics_params) - .loop_metric(SyncLoopMetrics::new)? - .standalone_metric(GlobalMetrics::new)? + .with_metrics(metrics_params) + .loop_metric(SyncLoopMetrics::new(Some(&metrics_prefix::

()))?)? .expose() .await? .run(metrics_prefix::

(), move |source_client, target_client, metrics| { diff --git a/relays/finality/src/lib.rs b/relays/finality/src/lib.rs index 78ef33f1b376..6421d13b787c 100644 --- a/relays/finality/src/lib.rs +++ b/relays/finality/src/lib.rs @@ -28,6 +28,7 @@ use std::fmt::Debug; mod finality_loop; mod finality_loop_tests; +mod sync_loop_metrics; /// Finality proofs synchronization pipeline. pub trait FinalitySyncPipeline: 'static + Clone + Debug + Send + Sync { diff --git a/relays/finality/src/sync_loop_metrics.rs b/relays/finality/src/sync_loop_metrics.rs new file mode 100644 index 000000000000..1f65dac17c05 --- /dev/null +++ b/relays/finality/src/sync_loop_metrics.rs @@ -0,0 +1,64 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Metrics for headers synchronization relay loop. + +use relay_utils::metrics::{ + metric_name, register, GaugeVec, Metric, Opts, PrometheusError, Registry, U64, +}; + +/// Headers sync metrics. +#[derive(Clone)] +pub struct SyncLoopMetrics { + /// Best syncing headers at "source" and "target" nodes. + best_block_numbers: GaugeVec, +} + +impl SyncLoopMetrics { + /// Create and register headers loop metrics. + pub fn new(prefix: Option<&str>) -> Result { + Ok(SyncLoopMetrics { + best_block_numbers: GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best block numbers on source and target nodes", + ), + &["node"], + )?, + }) + } + + /// Update best block number at source. + pub fn update_best_block_at_source>(&self, source_best_number: Number) { + self.best_block_numbers + .with_label_values(&["source"]) + .set(source_best_number.into()); + } + + /// Update best block number at target. + pub fn update_best_block_at_target>(&self, target_best_number: Number) { + self.best_block_numbers + .with_label_values(&["target"]) + .set(target_best_number.into()); + } +} + +impl Metric for SyncLoopMetrics { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.best_block_numbers.clone(), registry)?; + Ok(()) + } +} diff --git a/relays/headers/Cargo.toml b/relays/headers/Cargo.toml deleted file mode 100644 index 31d3166a9978..000000000000 --- a/relays/headers/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "headers-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -futures = "0.3.5" -linked-hash-map = "0.5.3" -log = "0.4.11" -num-traits = "0.2" -parking_lot = "0.11.0" -relay-utils = { path = "../utils" } diff --git a/relays/headers/src/headers.rs b/relays/headers/src/headers.rs deleted file mode 100644 index 8d67c1cf4857..000000000000 --- a/relays/headers/src/headers.rs +++ /dev/null @@ -1,1703 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Headers queue - the intermediate buffer that is filled when headers are read -//! from the source chain. Headers are removed from the queue once they become -//! known to the target chain. Inside, there are several sub-queues, where headers -//! may stay until source/target chain state isn't updated. When a header reaches the -//! `ready` sub-queue, it may be submitted to the target chain. - -use crate::sync_types::{ - HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader, -}; - -use linked_hash_map::LinkedHashMap; -use num_traits::{One, Zero}; -use relay_utils::HeaderId; -use std::{ - collections::{ - btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap, - HashSet, - }, - time::{Duration, Instant}, -}; - -type HeadersQueue

= BTreeMap< -

::Number, - HashMap<

::Hash, QueuedHeader

>, ->; -type SyncedChildren

= BTreeMap< -

::Number, - HashMap<

::Hash, HashSet>>, ->; -type KnownHeaders

= BTreeMap< -

::Number, - HashMap<

::Hash, HeaderStatus>, ->; - -/// We're trying to fetch completion data for single header at this interval. -const RETRY_FETCH_COMPLETION_INTERVAL: Duration = Duration::from_secs(20); - -/// Headers queue. -#[derive(Debug)] -pub struct QueuedHeaders { - /// Headers that are received from source node, but we (native sync code) have - /// never seen their parents. So we need to check if we can/should submit this header. - maybe_orphan: HeadersQueue

, - /// Headers that are received from source node, and we (native sync code) have - /// checked that Substrate runtime doesn't know their parents. So we need to submit parents - /// first. - orphan: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but we need to check - /// whether submission requires extra data to be provided. - maybe_extra: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but we need to retrieve - /// extra data first. - extra: HeadersQueue

, - /// Headers that are ready to be submitted to target node. - ready: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but their ancestor is incomplete. - /// Thus we're waiting for these ancestors to be completed first. - /// Note that the incomplete header itself is synced and it isn't in this queue. - incomplete: HeadersQueue

, - /// Headers that are (we believe) currently submitted to target node by our, - /// not-yet mined transactions. - submitted: HeadersQueue

, - /// Synced headers children. We need it to support case when header is synced, but some of - /// its parents are incomplete. - synced_children: SyncedChildren

, - /// Pointers to all headers that we ever seen and we believe we can touch in the future. - known_headers: KnownHeaders

, - /// Headers that are waiting for completion data from source node. Mapped (and auto-sorted - /// by) to the last fetch time. - incomplete_headers: LinkedHashMap, Option>, - /// Headers that are waiting to be completed at target node. Auto-sorted by insertion time. - completion_data: LinkedHashMap, P::Completion>, - /// Best synced block number. - best_synced_number: P::Number, - /// Pruned blocks border. We do not store or accept any blocks with number less than - /// this number. - prune_border: P::Number, -} - -impl Default for QueuedHeaders

{ - fn default() -> Self { - QueuedHeaders { - maybe_orphan: HeadersQueue::new(), - orphan: HeadersQueue::new(), - maybe_extra: HeadersQueue::new(), - extra: HeadersQueue::new(), - ready: HeadersQueue::new(), - incomplete: HeadersQueue::new(), - submitted: HeadersQueue::new(), - synced_children: SyncedChildren::

::new(), - known_headers: KnownHeaders::

::new(), - incomplete_headers: LinkedHashMap::new(), - completion_data: LinkedHashMap::new(), - best_synced_number: Zero::zero(), - prune_border: Zero::zero(), - } - } -} - -impl QueuedHeaders

{ - /// Returns prune border. - #[cfg(test)] - pub fn prune_border(&self) -> P::Number { - self.prune_border - } - - /// Returns number of headers that are currently in given queue. - pub fn headers_in_status(&self, status: HeaderStatus) -> usize { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => 0, - HeaderStatus::MaybeOrphan => - self.maybe_orphan.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Orphan => - self.orphan.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::MaybeExtra => - self.maybe_extra.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Extra => - self.extra.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Ready => - self.ready.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Incomplete => - self.incomplete.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Submitted => - self.submitted.values().fold(0, |total, headers| total + headers.len()), - } - } - - /// Returns number of headers that are currently in the queue. - pub fn total_headers(&self) -> usize { - self.maybe_orphan.values().fold(0, |total, headers| total + headers.len()) + - self.orphan.values().fold(0, |total, headers| total + headers.len()) + - self.maybe_extra.values().fold(0, |total, headers| total + headers.len()) + - self.extra.values().fold(0, |total, headers| total + headers.len()) + - self.ready.values().fold(0, |total, headers| total + headers.len()) + - self.incomplete.values().fold(0, |total, headers| total + headers.len()) - } - - /// Returns number of best block in the queue. - pub fn best_queued_number(&self) -> P::Number { - std::cmp::max( - self.maybe_orphan.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.orphan.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.maybe_extra.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.extra.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.ready.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.incomplete - .keys() - .next_back() - .cloned() - .unwrap_or_else(Zero::zero), - self.submitted - .keys() - .next_back() - .cloned() - .unwrap_or_else(Zero::zero), - ), - ), - ), - ), - ), - ) - } - - /// Returns number of best synced block we have ever seen. It is either less - /// than `best_queued_number()`, or points to last synced block if queue is empty. - pub fn best_synced_number(&self) -> P::Number { - self.best_synced_number - } - - /// Returns synchronization status of the header. - pub fn status(&self, id: &HeaderIdOf

) -> HeaderStatus { - self.known_headers - .get(&id.0) - .and_then(|x| x.get(&id.1)) - .cloned() - .unwrap_or(HeaderStatus::Unknown) - } - - /// Get the oldest header from given queue. - pub fn header(&self, status: HeaderStatus) -> Option<&QueuedHeader

> { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => None, - HeaderStatus::MaybeOrphan => oldest_header(&self.maybe_orphan), - HeaderStatus::Orphan => oldest_header(&self.orphan), - HeaderStatus::MaybeExtra => oldest_header(&self.maybe_extra), - HeaderStatus::Extra => oldest_header(&self.extra), - HeaderStatus::Ready => oldest_header(&self.ready), - HeaderStatus::Incomplete => oldest_header(&self.incomplete), - HeaderStatus::Submitted => oldest_header(&self.submitted), - } - } - - /// Get the oldest headers from given queue until functor will return false. - pub fn headers( - &self, - status: HeaderStatus, - f: impl FnMut(&QueuedHeader

) -> bool, - ) -> Option>> { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => None, - HeaderStatus::MaybeOrphan => oldest_headers(&self.maybe_orphan, f), - HeaderStatus::Orphan => oldest_headers(&self.orphan, f), - HeaderStatus::MaybeExtra => oldest_headers(&self.maybe_extra, f), - HeaderStatus::Extra => oldest_headers(&self.extra, f), - HeaderStatus::Ready => oldest_headers(&self.ready, f), - HeaderStatus::Incomplete => oldest_headers(&self.incomplete, f), - HeaderStatus::Submitted => oldest_headers(&self.submitted, f), - } - } - - /// Appends new header, received from the source node, to the queue. - pub fn header_response(&mut self, header: P::Header) { - let id = header.id(); - let status = self.status(&id); - if status != HeaderStatus::Unknown { - log::debug!( - target: "bridge", - "Ignoring new {} header: {:?}. Status is {:?}.", - P::SOURCE_NAME, - id, - status, - ); - return - } - - if id.0 < self.prune_border { - log::debug!( - target: "bridge", - "Ignoring ancient new {} header: {:?}.", - P::SOURCE_NAME, - id, - ); - return - } - - let parent_id = header.parent_id(); - let parent_status = self.status(&parent_id); - let header = QueuedHeader::new(header); - - let status = match parent_status { - HeaderStatus::Unknown | HeaderStatus::MaybeOrphan => { - insert_header(&mut self.maybe_orphan, id, header); - HeaderStatus::MaybeOrphan - }, - HeaderStatus::Orphan => { - insert_header(&mut self.orphan, id, header); - HeaderStatus::Orphan - }, - HeaderStatus::MaybeExtra | - HeaderStatus::Extra | - HeaderStatus::Ready | - HeaderStatus::Incomplete | - HeaderStatus::Submitted | - HeaderStatus::Synced => { - insert_header(&mut self.maybe_extra, id, header); - HeaderStatus::MaybeExtra - }, - }; - - self.known_headers.entry(id.0).or_default().insert(id.1, status); - log::debug!( - target: "bridge", - "Queueing new {} header: {:?}. Queue: {:?}.", - P::SOURCE_NAME, - id, - status, - ); - } - - /// Receive the best header from the target node. - pub fn target_best_header_response(&mut self, id: &HeaderIdOf

) { - self.header_synced(id) - } - - /// Receive target node response for MaybeOrphan request. - pub fn maybe_orphan_response(&mut self, id: &HeaderIdOf

, response: bool) { - if !response { - move_header_descendants::

( - &mut [&mut self.maybe_orphan], - &mut self.orphan, - &mut self.known_headers, - HeaderStatus::Orphan, - id, - ); - return - } - - move_header_descendants::

( - &mut [&mut self.maybe_orphan, &mut self.orphan], - &mut self.maybe_extra, - &mut self.known_headers, - HeaderStatus::MaybeExtra, - id, - ); - } - - /// Receive target node response for MaybeExtra request. - pub fn maybe_extra_response(&mut self, id: &HeaderIdOf

, response: bool) { - let (destination_status, destination_queue) = if response { - (HeaderStatus::Extra, &mut self.extra) - } else if self.is_parent_incomplete(id) { - (HeaderStatus::Incomplete, &mut self.incomplete) - } else { - (HeaderStatus::Ready, &mut self.ready) - }; - - move_header( - &mut self.maybe_extra, - destination_queue, - &mut self.known_headers, - destination_status, - id, - |header| header, - ); - } - - /// Receive extra from source node. - pub fn extra_response(&mut self, id: &HeaderIdOf

, extra: P::Extra) { - let (destination_status, destination_queue) = if self.is_parent_incomplete(id) { - (HeaderStatus::Incomplete, &mut self.incomplete) - } else { - (HeaderStatus::Ready, &mut self.ready) - }; - - // move header itself from extra to ready queue - move_header( - &mut self.extra, - destination_queue, - &mut self.known_headers, - destination_status, - id, - |header| header.set_extra(extra), - ); - } - - /// Receive completion response from source node. - pub fn completion_response(&mut self, id: &HeaderIdOf

, completion: Option) { - let completion = match completion { - Some(completion) => completion, - None => { - log::debug!( - target: "bridge", - "{} Node is still missing completion data for header: {:?}. Will retry later.", - P::SOURCE_NAME, - id, - ); - - return - }, - }; - - // do not remove from `incomplete_headers` here, because otherwise we'll miss - // completion 'notification' - // this could lead to duplicate completion retrieval (if completion transaction isn't mined - // for too long) - // - // instead, we're moving entry to the end of the queue, so that completion data won't be - // refetched instantly - if self.incomplete_headers.remove(id).is_some() { - log::debug!( - target: "bridge", - "Received completion data from {} for header: {:?}", - P::SOURCE_NAME, - id, - ); - - self.completion_data.insert(*id, completion); - self.incomplete_headers.insert(*id, Some(Instant::now())); - } - } - - /// When header is submitted to target node. - pub fn headers_submitted(&mut self, ids: Vec>) { - for id in ids { - move_header( - &mut self.ready, - &mut self.submitted, - &mut self.known_headers, - HeaderStatus::Submitted, - &id, - |header| header, - ); - } - } - - /// When header completion data is sent to target node. - pub fn header_completed(&mut self, id: &HeaderIdOf

) { - if self.completion_data.remove(id).is_some() { - log::debug!( - target: "bridge", - "Sent completion data to {} for header: {:?}", - P::TARGET_NAME, - id, - ); - - // transaction can be dropped by target chain nodes => it would never be mined - // - // in current implementation the sync loop would wait for some time && if best - // **source** header won't change on **target** node, then the sync will be restarted - // => we'll resubmit the same completion data again (the same is true for submitted - // headers) - // - // the other option would be to track emitted transactions at least on target node, - // but it won't give us 100% guarantee anyway - // - // => we're just dropping completion data just after it has been submitted - } - } - - /// Marks given headers incomplete. - pub fn add_incomplete_headers( - &mut self, - make_header_incomplete: bool, - new_incomplete_headers: Vec>, - ) { - for new_incomplete_header in new_incomplete_headers { - if make_header_incomplete { - self.header_synced(&new_incomplete_header); - } - - let move_origins = - select_synced_children::

(&self.synced_children, &new_incomplete_header); - let move_origins = - move_origins.into_iter().chain(std::iter::once(new_incomplete_header)); - for move_origin in move_origins { - move_header_descendants::

( - &mut [&mut self.ready, &mut self.submitted], - &mut self.incomplete, - &mut self.known_headers, - HeaderStatus::Incomplete, - &move_origin, - ); - } - - if make_header_incomplete { - log::debug!( - target: "bridge", - "Scheduling completion data retrieval for header: {:?}", - new_incomplete_header, - ); - - self.incomplete_headers.insert(new_incomplete_header, None); - } - } - } - - /// When incomplete headers ids are received from target node. - pub fn incomplete_headers_response(&mut self, ids: HashSet>) { - // all new incomplete headers are marked Synced and all their descendants - // are moved from Ready/Submitted to Incomplete queue - let new_incomplete_headers = ids - .iter() - .filter(|id| { - !self.incomplete_headers.contains_key(id) && !self.completion_data.contains_key(id) - }) - .cloned() - .collect::>(); - self.add_incomplete_headers(true, new_incomplete_headers); - - // for all headers that were incompleted previously, but now are completed, we move - // all descendants from incomplete to ready - let just_completed_headers = self - .incomplete_headers - .keys() - .chain(self.completion_data.keys()) - .filter(|id| !ids.contains(id)) - .cloned() - .collect::>(); - for just_completed_header in just_completed_headers { - // sub2eth rejects H if H.Parent is incomplete - // sub2sub allows 'syncing' headers like that - // => let's check if there are some synced children of just completed header - let move_origins = - select_synced_children::

(&self.synced_children, &just_completed_header); - let move_origins = - move_origins.into_iter().chain(std::iter::once(just_completed_header)); - for move_origin in move_origins { - move_header_descendants::

( - &mut [&mut self.incomplete], - &mut self.ready, - &mut self.known_headers, - HeaderStatus::Ready, - &move_origin, - ); - } - - log::debug!( - target: "bridge", - "Completion data is no longer required for header: {:?}", - just_completed_header, - ); - - self.incomplete_headers.remove(&just_completed_header); - self.completion_data.remove(&just_completed_header); - } - } - - /// Returns true if given header requires completion data. - pub fn requires_completion_data(&self, id: &HeaderIdOf

) -> bool { - self.incomplete_headers.contains_key(id) - } - - /// Returns id of the header for which we want to fetch completion data. - pub fn incomplete_header(&mut self) -> Option> { - queued_incomplete_header(&mut self.incomplete_headers, |last_fetch_time| { - let retry = match *last_fetch_time { - Some(last_fetch_time) => - last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL, - None => true, - }; - - if retry { - *last_fetch_time = Some(Instant::now()); - } - - retry - }) - .map(|(id, _)| id) - } - - /// Returns header completion data to upload to target node. - pub fn header_to_complete(&mut self) -> Option<(HeaderIdOf

, &P::Completion)> { - queued_incomplete_header(&mut self.completion_data, |_| true) - } - - /// Prune and never accept headers before this block. - pub fn prune(&mut self, prune_border: P::Number) { - if prune_border <= self.prune_border { - return - } - - prune_queue(&mut self.maybe_orphan, prune_border); - prune_queue(&mut self.orphan, prune_border); - prune_queue(&mut self.maybe_extra, prune_border); - prune_queue(&mut self.extra, prune_border); - prune_queue(&mut self.ready, prune_border); - prune_queue(&mut self.submitted, prune_border); - prune_queue(&mut self.incomplete, prune_border); - self.synced_children = self.synced_children.split_off(&prune_border); - prune_known_headers::

(&mut self.known_headers, prune_border); - self.prune_border = prune_border; - } - - /// Forgets all ever known headers. - pub fn clear(&mut self) { - self.maybe_orphan.clear(); - self.orphan.clear(); - self.maybe_extra.clear(); - self.extra.clear(); - self.ready.clear(); - self.incomplete.clear(); - self.submitted.clear(); - self.synced_children.clear(); - self.known_headers.clear(); - self.best_synced_number = Zero::zero(); - self.prune_border = Zero::zero(); - } - - /// Returns true if parent of this header is either incomplete or waiting for - /// its own incomplete ancestor to be completed. - fn is_parent_incomplete(&self, id: &HeaderIdOf

) -> bool { - let status = self.status(id); - let header = match status { - HeaderStatus::MaybeOrphan => header(&self.maybe_orphan, id), - HeaderStatus::Orphan => header(&self.orphan, id), - HeaderStatus::MaybeExtra => header(&self.maybe_extra, id), - HeaderStatus::Extra => header(&self.extra, id), - HeaderStatus::Ready => header(&self.ready, id), - HeaderStatus::Incomplete => header(&self.incomplete, id), - HeaderStatus::Submitted => header(&self.submitted, id), - HeaderStatus::Unknown => return false, - HeaderStatus::Synced => return false, - }; - - match header { - Some(header) => { - let parent_id = header.header().parent_id(); - self.incomplete_headers.contains_key(&parent_id) || - self.completion_data.contains_key(&parent_id) || - self.status(&parent_id) == HeaderStatus::Incomplete - }, - None => false, - } - } - - /// When we receive new Synced header from target node. - fn header_synced(&mut self, id: &HeaderIdOf

) { - // update best synced block number - self.best_synced_number = std::cmp::max(self.best_synced_number, id.0); - - // all ancestors of this header are now synced => let's remove them from - // queues - let mut current = *id; - let mut id_processed = false; - let mut previous_current = None; - loop { - let header = match self.status(¤t) { - HeaderStatus::Unknown => break, - HeaderStatus::MaybeOrphan => remove_header(&mut self.maybe_orphan, ¤t), - HeaderStatus::Orphan => remove_header(&mut self.orphan, ¤t), - HeaderStatus::MaybeExtra => remove_header(&mut self.maybe_extra, ¤t), - HeaderStatus::Extra => remove_header(&mut self.extra, ¤t), - HeaderStatus::Ready => remove_header(&mut self.ready, ¤t), - HeaderStatus::Incomplete => remove_header(&mut self.incomplete, ¤t), - HeaderStatus::Submitted => remove_header(&mut self.submitted, ¤t), - HeaderStatus::Synced => break, - } - .expect("header has a given status; given queue has the header; qed"); - - // remember ids of all the children of the current header - let synced_children_entry = - self.synced_children.entry(current.0).or_default().entry(current.1).or_default(); - let all_queues = [ - &self.maybe_orphan, - &self.orphan, - &self.maybe_extra, - &self.extra, - &self.ready, - &self.incomplete, - &self.submitted, - ]; - for queue in &all_queues { - let children_from_queue = queue - .get(&(current.0 + One::one())) - .map(|potential_children| { - potential_children - .values() - .filter(|potential_child| { - potential_child.header().parent_id() == current - }) - .map(|child| child.id()) - .collect::>() - }) - .unwrap_or_default(); - synced_children_entry.extend(children_from_queue); - } - if let Some(previous_current) = previous_current { - synced_children_entry.insert(previous_current); - } - - set_header_status::

(&mut self.known_headers, ¤t, HeaderStatus::Synced); - - previous_current = Some(current); - current = header.parent_id(); - id_processed = true; - } - - // remember that the header itself is synced - // (condition is here to avoid duplicate log messages) - if !id_processed { - set_header_status::

(&mut self.known_headers, id, HeaderStatus::Synced); - } - - // now let's move all descendants from maybe_orphan && orphan queues to - // maybe_extra queue - move_header_descendants::

( - &mut [&mut self.maybe_orphan, &mut self.orphan], - &mut self.maybe_extra, - &mut self.known_headers, - HeaderStatus::MaybeExtra, - id, - ); - } -} - -/// Insert header to the queue. -fn insert_header( - queue: &mut HeadersQueue

, - id: HeaderIdOf

, - header: QueuedHeader

, -) { - queue.entry(id.0).or_default().insert(id.1, header); -} - -/// Remove header from the queue. -fn remove_header( - queue: &mut HeadersQueue

, - id: &HeaderIdOf

, -) -> Option> { - let mut headers_at = match queue.entry(id.0) { - BTreeMapEntry::Occupied(headers_at) => headers_at, - BTreeMapEntry::Vacant(_) => return None, - }; - - let header = headers_at.get_mut().remove(&id.1); - if headers_at.get().is_empty() { - headers_at.remove(); - } - header -} - -/// Get header from the queue. -fn header<'a, P: HeadersSyncPipeline>( - queue: &'a HeadersQueue

, - id: &HeaderIdOf

, -) -> Option<&'a QueuedHeader

> { - queue.get(&id.0).and_then(|by_hash| by_hash.get(&id.1)) -} - -/// Move header from source to destination queue. -/// -/// Returns ID of parent header, if header has been moved, or None otherwise. -fn move_header( - source_queue: &mut HeadersQueue

, - destination_queue: &mut HeadersQueue

, - known_headers: &mut KnownHeaders

, - destination_status: HeaderStatus, - id: &HeaderIdOf

, - prepare: impl FnOnce(QueuedHeader

) -> QueuedHeader

, -) -> Option> { - let header = match remove_header(source_queue, id) { - Some(header) => prepare(header), - None => return None, - }; - - let parent_id = header.header().parent_id(); - destination_queue.entry(id.0).or_default().insert(id.1, header); - set_header_status::

(known_headers, id, destination_status); - - Some(parent_id) -} - -/// Move all descendant headers from the source to destination queue. -fn move_header_descendants( - source_queues: &mut [&mut HeadersQueue

], - destination_queue: &mut HeadersQueue

, - known_headers: &mut KnownHeaders

, - destination_status: HeaderStatus, - id: &HeaderIdOf

, -) { - let mut current_number = id.0 + One::one(); - let mut current_parents = HashSet::new(); - current_parents.insert(id.1); - - while !current_parents.is_empty() { - let mut next_parents = HashSet::new(); - for source_queue in source_queues.iter_mut() { - let mut source_entry = match source_queue.entry(current_number) { - BTreeMapEntry::Occupied(source_entry) => source_entry, - BTreeMapEntry::Vacant(_) => continue, - }; - - let mut headers_to_move = Vec::new(); - let children_at_number = source_entry.get().keys().cloned().collect::>(); - for key in children_at_number { - let entry = match source_entry.get_mut().entry(key) { - HashMapEntry::Occupied(entry) => entry, - HashMapEntry::Vacant(_) => unreachable!("iterating existing keys; qed"), - }; - - if current_parents.contains(&entry.get().header().parent_id().1) { - let header_to_move = entry.remove(); - let header_to_move_id = header_to_move.id(); - headers_to_move.push((header_to_move_id, header_to_move)); - set_header_status::

(known_headers, &header_to_move_id, destination_status); - } - } - - if source_entry.get().is_empty() { - source_entry.remove(); - } - - next_parents.extend(headers_to_move.iter().map(|(id, _)| id.1)); - - destination_queue - .entry(current_number) - .or_default() - .extend(headers_to_move.into_iter().map(|(id, h)| (id.1, h))) - } - - current_number = current_number + One::one(); - std::mem::swap(&mut current_parents, &mut next_parents); - } -} - -/// Selects (recursive) all synced children of given header. -fn select_synced_children( - synced_children: &SyncedChildren

, - id: &HeaderIdOf

, -) -> Vec> { - let mut result = Vec::new(); - let mut current_parents = HashSet::new(); - current_parents.insert(*id); - - while !current_parents.is_empty() { - let mut next_parents = HashSet::new(); - for current_parent in ¤t_parents { - let current_parent_synced_children = synced_children - .get(¤t_parent.0) - .and_then(|by_number_entry| by_number_entry.get(¤t_parent.1)); - if let Some(current_parent_synced_children) = current_parent_synced_children { - for current_parent_synced_child in current_parent_synced_children { - result.push(*current_parent_synced_child); - next_parents.insert(*current_parent_synced_child); - } - } - } - - let _ = std::mem::replace(&mut current_parents, next_parents); - } - - result -} - -/// Return oldest header from the queue. -fn oldest_header(queue: &HeadersQueue

) -> Option<&QueuedHeader

> { - queue.values().flat_map(|h| h.values()).next() -} - -/// Return oldest headers from the queue until functor will return false. -fn oldest_headers( - queue: &HeadersQueue

, - mut f: impl FnMut(&QueuedHeader

) -> bool, -) -> Option>> { - let result = queue.values().flat_map(|h| h.values()).take_while(|h| f(h)).collect::>(); - if result.is_empty() { - None - } else { - Some(result) - } -} - -/// Forget all headers with number less than given. -fn prune_queue(queue: &mut HeadersQueue

, prune_border: P::Number) { - *queue = queue.split_off(&prune_border); -} - -/// Forget all known headers with number less than given. -fn prune_known_headers( - known_headers: &mut KnownHeaders

, - prune_border: P::Number, -) { - let new_known_headers = known_headers.split_off(&prune_border); - for (pruned_number, pruned_headers) in &*known_headers { - for pruned_hash in pruned_headers.keys() { - log::debug!(target: "bridge", "Pruning header {:?}.", HeaderId(*pruned_number, *pruned_hash)); - } - } - *known_headers = new_known_headers; -} - -/// Change header status. -fn set_header_status( - known_headers: &mut KnownHeaders

, - id: &HeaderIdOf

, - status: HeaderStatus, -) { - log::debug!( - target: "bridge", - "{} header {:?} is now {:?}", - P::SOURCE_NAME, - id, - status, - ); - *known_headers.entry(id.0).or_default().entry(id.1).or_insert(status) = status; -} - -/// Returns queued incomplete header with maximal elapsed time since last update. -fn queued_incomplete_header( - map: &mut LinkedHashMap, - filter: impl FnMut(&mut T) -> bool, -) -> Option<(Id, &T)> { - // TODO (#84): headers that have been just appended to the end of the queue would have to wait - // until all previous headers will be retried - - let retry_old_header = map - .front() - .map(|(key, _)| key.clone()) - .and_then(|key| map.get_mut(&key).map(filter)) - .unwrap_or(false); - if retry_old_header { - let (header_key, header) = - map.pop_front().expect("we have checked that front() exists; qed"); - map.insert(header_key, header); - return map.back().map(|(id, data)| (id.clone(), data)) - } - - None -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::{ - sync_loop_tests::{ - TestHash, TestHeader, TestHeaderId, TestHeadersSyncPipeline, TestNumber, - }, - sync_types::QueuedHeader, - }; - - pub(crate) fn header(number: TestNumber) -> QueuedHeader { - QueuedHeader::new(TestHeader { number, hash: hash(number), parent_hash: hash(number - 1) }) - } - - pub(crate) fn hash(number: TestNumber) -> TestHash { - number - } - - pub(crate) fn id(number: TestNumber) -> TestHeaderId { - HeaderId(number, hash(number)) - } - - #[test] - fn total_headers_works() { - // total headers just sums up number of headers in every queue - let mut queue = QueuedHeaders::::default(); - queue - .maybe_orphan - .entry(1) - .or_default() - .insert(hash(1), QueuedHeader::::new(Default::default())); - queue - .maybe_orphan - .entry(1) - .or_default() - .insert(hash(2), QueuedHeader::::new(Default::default())); - queue - .maybe_orphan - .entry(2) - .or_default() - .insert(hash(3), QueuedHeader::::new(Default::default())); - queue - .orphan - .entry(3) - .or_default() - .insert(hash(4), QueuedHeader::::new(Default::default())); - queue - .maybe_extra - .entry(4) - .or_default() - .insert(hash(5), QueuedHeader::::new(Default::default())); - queue - .ready - .entry(5) - .or_default() - .insert(hash(6), QueuedHeader::::new(Default::default())); - queue - .incomplete - .entry(6) - .or_default() - .insert(hash(7), QueuedHeader::::new(Default::default())); - assert_eq!(queue.total_headers(), 7); - } - - #[test] - fn best_queued_number_works() { - // initially there are headers in MaybeOrphan queue only - let mut queue = QueuedHeaders::::default(); - queue - .maybe_orphan - .entry(1) - .or_default() - .insert(hash(1), QueuedHeader::::new(Default::default())); - queue - .maybe_orphan - .entry(1) - .or_default() - .insert(hash(2), QueuedHeader::::new(Default::default())); - queue - .maybe_orphan - .entry(3) - .or_default() - .insert(hash(3), QueuedHeader::::new(Default::default())); - assert_eq!(queue.best_queued_number(), 3); - // and then there's better header in Orphan - queue - .orphan - .entry(10) - .or_default() - .insert(hash(10), QueuedHeader::::new(Default::default())); - assert_eq!(queue.best_queued_number(), 10); - // and then there's better header in MaybeExtra - queue - .maybe_extra - .entry(20) - .or_default() - .insert(hash(20), QueuedHeader::::new(Default::default())); - assert_eq!(queue.best_queued_number(), 20); - // and then there's better header in Ready - queue - .ready - .entry(30) - .or_default() - .insert(hash(30), QueuedHeader::::new(Default::default())); - assert_eq!(queue.best_queued_number(), 30); - // and then there's better header in MaybeOrphan again - queue - .maybe_orphan - .entry(40) - .or_default() - .insert(hash(40), QueuedHeader::::new(Default::default())); - assert_eq!(queue.best_queued_number(), 40); - // and then there's some header in Incomplete - queue - .incomplete - .entry(50) - .or_default() - .insert(hash(50), QueuedHeader::::new(Default::default())); - assert_eq!(queue.best_queued_number(), 50); - } - - #[test] - fn status_works() { - // all headers are unknown initially - let mut queue = QueuedHeaders::::default(); - assert_eq!(queue.status(&id(10)), HeaderStatus::Unknown); - // and status is read from the KnownHeaders - queue.known_headers.entry(10).or_default().insert(hash(10), HeaderStatus::Ready); - assert_eq!(queue.status(&id(10)), HeaderStatus::Ready); - } - - #[test] - fn header_works() { - // initially we have oldest header #10 - let mut queue = QueuedHeaders::::default(); - queue.maybe_orphan.entry(10).or_default().insert(hash(1), header(100)); - assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(100)); - // inserting #20 changes nothing - queue.maybe_orphan.entry(20).or_default().insert(hash(1), header(101)); - assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(100)); - // inserting #5 makes it oldest - queue.maybe_orphan.entry(5).or_default().insert(hash(1), header(102)); - assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(102)); - } - - #[test] - fn header_response_works() { - // when parent is Synced, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Synced); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Ready, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Receipts, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Extra); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is MaybeExtra, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Orphan, we insert to Orphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Orphan); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::Orphan); - - // when parent is MaybeOrphan, we insert to MaybeOrphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeOrphan); - - // when parent is unknown, we insert to MaybeOrphan - let mut queue = QueuedHeaders::::default(); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeOrphan); - } - - #[test] - fn ancestors_are_synced_on_substrate_best_header_response() { - // let's say someone else has submitted transaction to bridge that changes - // its best block to #100. At this time we have: - // #100 in MaybeOrphan - // #99 in Orphan - // #98 in MaybeExtra - // #97 in Receipts - // #96 in Ready - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100)); - queue - .known_headers - .entry(99) - .or_default() - .insert(hash(99), HeaderStatus::Orphan); - queue.orphan.entry(99).or_default().insert(hash(99), header(99)); - queue - .known_headers - .entry(98) - .or_default() - .insert(hash(98), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(98).or_default().insert(hash(98), header(98)); - queue.known_headers.entry(97).or_default().insert(hash(97), HeaderStatus::Extra); - queue.extra.entry(97).or_default().insert(hash(97), header(97)); - queue.known_headers.entry(96).or_default().insert(hash(96), HeaderStatus::Ready); - queue.ready.entry(96).or_default().insert(hash(96), header(96)); - queue.target_best_header_response(&id(100)); - - // then the #100 and all ancestors of #100 (#96..#99) are treated as synced - assert!(queue.maybe_orphan.is_empty()); - assert!(queue.orphan.is_empty()); - assert!(queue.maybe_extra.is_empty()); - assert!(queue.extra.is_empty()); - assert!(queue.ready.is_empty()); - assert_eq!(queue.known_headers.len(), 5); - assert!(queue - .known_headers - .values() - .all(|s| s.values().all(|s| *s == HeaderStatus::Synced))); - - // children of synced headers are stored - assert_eq!( - vec![id(97)], - queue.synced_children[&96][&hash(96)].iter().cloned().collect::>() - ); - assert_eq!( - vec![id(98)], - queue.synced_children[&97][&hash(97)].iter().cloned().collect::>() - ); - assert_eq!( - vec![id(99)], - queue.synced_children[&98][&hash(98)].iter().cloned().collect::>() - ); - assert_eq!( - vec![id(100)], - queue.synced_children[&99][&hash(99)].iter().cloned().collect::>() - ); - assert_eq!(0, queue.synced_children[&100][&hash(100)].len()); - } - - #[test] - fn descendants_are_moved_on_substrate_best_header_response() { - // let's say someone else has submitted transaction to bridge that changes - // its best block to #100. At this time we have: - // #101 in Orphan - // #102 in MaybeOrphan - // #103 in Orphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Orphan); - queue.orphan.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeOrphan); - queue.maybe_orphan.entry(102).or_default().insert(hash(102), header(102)); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Orphan); - queue.orphan.entry(103).or_default().insert(hash(103), header(103)); - queue.target_best_header_response(&id(100)); - - // all descendants are moved to MaybeExtra - assert!(queue.maybe_orphan.is_empty()); - assert!(queue.orphan.is_empty()); - assert_eq!(queue.maybe_extra.len(), 3); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&102][&hash(102)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&103][&hash(103)], HeaderStatus::MaybeExtra); - } - - #[test] - fn positive_maybe_orphan_response_works() { - // let's say we have: - // #100 in MaybeOrphan - // #101 in Orphan - // #102 in MaybeOrphan - // and we have asked for MaybeOrphan status of #100.parent (i.e. #99) - // and the response is: YES, #99 is known to the Substrate runtime - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Orphan); - queue.orphan.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeOrphan); - queue.maybe_orphan.entry(102).or_default().insert(hash(102), header(102)); - queue.maybe_orphan_response(&id(99), true); - - // then all headers (#100..#103) are moved to the MaybeExtra queue - assert!(queue.orphan.is_empty()); - assert!(queue.maybe_orphan.is_empty()); - assert_eq!(queue.maybe_extra.len(), 3); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&102][&hash(102)], HeaderStatus::MaybeExtra); - } - - #[test] - fn negative_maybe_orphan_response_works() { - // let's say we have: - // #100 in MaybeOrphan - // #101 in MaybeOrphan - // and we have asked for MaybeOrphan status of #100.parent (i.e. #99) - // and the response is: NO, #99 is NOT known to the Substrate runtime - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::MaybeOrphan); - queue.maybe_orphan.entry(101).or_default().insert(hash(101), header(101)); - queue.maybe_orphan_response(&id(99), false); - - // then all headers (#100..#101) are moved to the Orphan queue - assert!(queue.maybe_orphan.is_empty()); - assert_eq!(queue.orphan.len(), 2); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Orphan); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::Orphan); - } - - #[test] - fn positive_maybe_extra_response_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(100).or_default().insert(hash(100), header(100)); - queue.maybe_extra_response(&id(100), true); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.extra.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Extra); - } - - #[test] - fn negative_maybe_extra_response_works() { - // when parent header is complete - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(100).or_default().insert(hash(100), header(100)); - queue.maybe_extra_response(&id(100), false); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.ready.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Ready); - - // when parent header is incomplete - queue.incomplete_headers.insert(id(200), None); - queue - .known_headers - .entry(201) - .or_default() - .insert(hash(201), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(201).or_default().insert(hash(201), header(201)); - queue.maybe_extra_response(&id(201), false); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.known_headers[&201][&hash(201)], HeaderStatus::Incomplete); - } - - #[test] - fn receipts_response_works() { - // when parent header is complete - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Extra); - queue.extra.entry(100).or_default().insert(hash(100), header(100)); - queue.extra_response(&id(100), 100_100); - assert!(queue.extra.is_empty()); - assert_eq!(queue.ready.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Ready); - - // when parent header is incomplete - queue.incomplete_headers.insert(id(200), None); - queue - .known_headers - .entry(201) - .or_default() - .insert(hash(201), HeaderStatus::Extra); - queue.extra.entry(201).or_default().insert(hash(201), header(201)); - queue.extra_response(&id(201), 201_201); - assert!(queue.extra.is_empty()); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.known_headers[&201][&hash(201)], HeaderStatus::Incomplete); - } - - #[test] - fn header_submitted_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.ready.entry(100).or_default().insert(hash(100), header(100)); - queue.headers_submitted(vec![id(100)]); - assert!(queue.ready.is_empty()); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Submitted); - } - - #[test] - fn incomplete_header_works() { - let mut queue = QueuedHeaders::::default(); - - // nothing to complete if queue is empty - assert_eq!(queue.incomplete_header(), None); - - // when there's new header to complete => ask for completion data - queue.incomplete_headers.insert(id(100), None); - assert_eq!(queue.incomplete_header(), Some(id(100))); - - // we have just asked for completion data => nothing to request - assert_eq!(queue.incomplete_header(), None); - - // enough time have passed => ask again - queue.incomplete_headers.clear(); - queue.incomplete_headers.insert( - id(100), - Some( - Instant::now() - RETRY_FETCH_COMPLETION_INTERVAL - RETRY_FETCH_COMPLETION_INTERVAL, - ), - ); - assert_eq!(queue.incomplete_header(), Some(id(100))); - } - - #[test] - fn completion_response_works() { - let mut queue = QueuedHeaders::::default(); - queue.incomplete_headers.insert(id(100), None); - queue.incomplete_headers.insert(id(200), Some(Instant::now())); - queue.incomplete_headers.insert(id(300), Some(Instant::now())); - - // when header isn't incompete, nothing changes - queue.completion_response(&id(400), None); - assert_eq!(queue.incomplete_headers.len(), 3); - assert_eq!(queue.completion_data.len(), 0); - assert_eq!(queue.header_to_complete(), None); - - // when response is None, nothing changes - queue.completion_response(&id(100), None); - assert_eq!(queue.incomplete_headers.len(), 3); - assert_eq!(queue.completion_data.len(), 0); - assert_eq!(queue.header_to_complete(), None); - - // when response is Some, we're scheduling completion - queue.completion_response(&id(200), Some(200_200)); - assert_eq!(queue.completion_data.len(), 1); - assert!(queue.completion_data.contains_key(&id(200))); - assert_eq!(queue.header_to_complete(), Some((id(200), &200_200))); - assert_eq!( - queue.incomplete_headers.keys().collect::>(), - vec![&id(100), &id(300), &id(200)], - ); - } - - #[test] - fn header_completed_works() { - let mut queue = QueuedHeaders::::default(); - queue.completion_data.insert(id(100), 100_100); - - // when unknown header is completed - queue.header_completed(&id(200)); - assert_eq!(queue.completion_data.len(), 1); - - // when known header is completed - queue.header_completed(&id(100)); - assert_eq!(queue.completion_data.len(), 0); - } - - #[test] - fn incomplete_headers_response_works() { - let mut queue = QueuedHeaders::::default(); - - // when we have already submitted #101 and #102 is ready - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Ready); - queue.submitted.entry(102).or_default().insert(hash(102), header(102)); - - // AND now we know that the #100 is incomplete - queue.incomplete_headers_response(vec![id(100)].into_iter().collect()); - - // => #101 and #102 are moved to the Incomplete and #100 is now synced - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Incomplete); - assert_eq!(queue.status(&id(102)), HeaderStatus::Incomplete); - assert_eq!(queue.submitted.len(), 0); - assert_eq!(queue.ready.len(), 0); - assert!(queue.incomplete.entry(101).or_default().contains_key(&hash(101))); - assert!(queue.incomplete.entry(102).or_default().contains_key(&hash(102))); - assert!(queue.incomplete_headers.contains_key(&id(100))); - assert!(queue.completion_data.is_empty()); - - // and then header #100 is no longer incomplete - queue.incomplete_headers_response(vec![].into_iter().collect()); - - // => #101 and #102 are moved to the Ready queue and #100 if now forgotten - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Ready); - assert_eq!(queue.status(&id(102)), HeaderStatus::Ready); - assert_eq!(queue.incomplete.len(), 0); - assert_eq!(queue.submitted.len(), 0); - assert!(queue.ready.entry(101).or_default().contains_key(&hash(101))); - assert!(queue.ready.entry(102).or_default().contains_key(&hash(102))); - assert!(queue.incomplete_headers.is_empty()); - assert!(queue.completion_data.is_empty()); - } - - #[test] - fn is_parent_incomplete_works() { - let mut queue = QueuedHeaders::::default(); - - // when we do not know header itself - assert!(!queue.is_parent_incomplete(&id(50))); - - // when we do not know parent - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Incomplete); - queue.incomplete.entry(100).or_default().insert(hash(100), header(100)); - assert!(!queue.is_parent_incomplete(&id(100))); - - // when parent is inside incomplete queue (i.e. some other ancestor is actually incomplete) - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - assert!(queue.is_parent_incomplete(&id(101))); - - // when parent is the incomplete header and we do not have completion data - queue.incomplete_headers.insert(id(199), None); - queue - .known_headers - .entry(200) - .or_default() - .insert(hash(200), HeaderStatus::Submitted); - queue.submitted.entry(200).or_default().insert(hash(200), header(200)); - assert!(queue.is_parent_incomplete(&id(200))); - - // when parent is the incomplete header and we have completion data - queue.completion_data.insert(id(299), 299_299); - queue - .known_headers - .entry(300) - .or_default() - .insert(hash(300), HeaderStatus::Submitted); - queue.submitted.entry(300).or_default().insert(hash(300), header(300)); - assert!(queue.is_parent_incomplete(&id(300))); - } - - #[test] - fn prune_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(105) - .or_default() - .insert(hash(105), HeaderStatus::Incomplete); - queue.incomplete.entry(105).or_default().insert(hash(105), header(105)); - queue - .known_headers - .entry(104) - .or_default() - .insert(hash(104), HeaderStatus::MaybeOrphan); - queue.maybe_orphan.entry(104).or_default().insert(hash(104), header(104)); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Orphan); - queue.orphan.entry(103).or_default().insert(hash(103), header(103)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(102).or_default().insert(hash(102), header(102)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Extra); - queue.extra.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.ready.entry(100).or_default().insert(hash(100), header(100)); - queue - .synced_children - .entry(100) - .or_default() - .insert(hash(100), vec![id(101)].into_iter().collect()); - queue - .synced_children - .entry(102) - .or_default() - .insert(hash(102), vec![id(102)].into_iter().collect()); - - queue.prune(102); - - assert_eq!(queue.ready.len(), 0); - assert_eq!(queue.extra.len(), 0); - assert_eq!(queue.maybe_extra.len(), 1); - assert_eq!(queue.orphan.len(), 1); - assert_eq!(queue.maybe_orphan.len(), 1); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.synced_children.len(), 1); - assert_eq!(queue.known_headers.len(), 4); - - queue.prune(110); - - assert_eq!(queue.ready.len(), 0); - assert_eq!(queue.extra.len(), 0); - assert_eq!(queue.maybe_extra.len(), 0); - assert_eq!(queue.orphan.len(), 0); - assert_eq!(queue.maybe_orphan.len(), 0); - assert_eq!(queue.incomplete.len(), 0); - assert_eq!(queue.synced_children.len(), 0); - assert_eq!(queue.known_headers.len(), 0); - - queue.header_response(header(109).header().clone()); - assert_eq!(queue.known_headers.len(), 0); - - queue.header_response(header(110).header().clone()); - assert_eq!(queue.known_headers.len(), 1); - } - - #[test] - fn incomplete_headers_are_still_incomplete_after_advance() { - let mut queue = QueuedHeaders::::default(); - - // relay#1 knows that header#100 is incomplete && it has headers 101..104 in incomplete - // queue - queue.incomplete_headers.insert(id(100), None); - queue.incomplete.entry(101).or_default().insert(hash(101), header(101)); - queue.incomplete.entry(102).or_default().insert(hash(102), header(102)); - queue.incomplete.entry(103).or_default().insert(hash(103), header(103)); - queue.incomplete.entry(104).or_default().insert(hash(104), header(104)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Synced); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Incomplete); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Incomplete); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Incomplete); - queue - .known_headers - .entry(104) - .or_default() - .insert(hash(104), HeaderStatus::Incomplete); - - // let's say relay#2 completes header#100 and then submits header#101+header#102 and it - // turns out that header#102 is also incomplete - queue.incomplete_headers_response(vec![id(102)].into_iter().collect()); - - // then the header#103 and the header#104 must have Incomplete status - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(103)), HeaderStatus::Incomplete); - assert_eq!(queue.status(&id(104)), HeaderStatus::Incomplete); - } - - #[test] - fn incomplete_headers_response_moves_synced_headers() { - let mut queue = QueuedHeaders::::default(); - - // we have submitted two headers - 100 and 101. 102 is ready - queue.submitted.entry(100).or_default().insert(hash(100), header(100)); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - queue.ready.entry(102).or_default().insert(hash(102), header(102)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Submitted); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Ready); - - // both headers are accepted - queue.target_best_header_response(&id(101)); - - // but header 100 is incomplete - queue.incomplete_headers_response(vec![id(100)].into_iter().collect()); - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Incomplete); - assert!(queue.incomplete_headers.contains_key(&id(100))); - assert!(queue.incomplete[&102].contains_key(&hash(102))); - - // when header 100 is completed, 101 is synced and 102 is ready - queue.incomplete_headers_response(HashSet::new()); - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Ready); - assert!(queue.ready[&102].contains_key(&hash(102))); - } -} diff --git a/relays/headers/src/lib.rs b/relays/headers/src/lib.rs deleted file mode 100644 index 8946355921f0..000000000000 --- a/relays/headers/src/lib.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying source chain headers to target chain. This module provides entrypoint -//! that starts reading new headers from source chain and submit these headers as -//! module/contract transactions to the target chain. Pallet/contract on the target -//! chain is a light-client of the source chain. All other trustless bridge -//! applications are built using this light-client, so running headers-relay is -//! essential for running all other bridge applications. - -// required for futures::select! -#![recursion_limit = "1024"] -#![warn(missing_docs)] - -pub mod headers; -pub mod sync; -pub mod sync_loop; -pub mod sync_loop_metrics; -pub mod sync_loop_tests; -pub mod sync_types; diff --git a/relays/headers/src/sync.rs b/relays/headers/src/sync.rs deleted file mode 100644 index 012b63f0dc59..000000000000 --- a/relays/headers/src/sync.rs +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Headers synchronization context. This structure wraps headers queue and is -//! able to choose: which headers to read from the source chain? Which headers -//! to submit to the target chain? The context makes decisions basing on parameters -//! passed using `HeadersSyncParams` structure. - -use crate::{ - headers::QueuedHeaders, - sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader}, -}; -use num_traits::{One, Saturating, Zero}; - -/// Common sync params. -#[derive(Debug, Clone)] -pub struct HeadersSyncParams { - /// Maximal number of ethereum headers to pre-download. - pub max_future_headers_to_download: usize, - /// Maximal number of active (we believe) submit header transactions. - pub max_headers_in_submitted_status: usize, - /// Maximal number of headers in single submit request. - pub max_headers_in_single_submit: usize, - /// Maximal total headers size in single submit request. - pub max_headers_size_in_single_submit: usize, - /// We only may store and accept (from Ethereum node) headers that have - /// number >= than "best_substrate_header.number" - "prune_depth". - pub prune_depth: u32, - /// Target transactions mode. - pub target_tx_mode: TargetTransactionMode, -} - -/// Target transaction mode. -#[derive(Debug, PartialEq, Clone)] -pub enum TargetTransactionMode { - /// Submit new headers using signed transactions. - Signed, - /// Submit new headers using unsigned transactions. - Unsigned, - /// Submit new headers using signed transactions, but only when we - /// believe that sync has stalled. - Backup, -} - -/// Headers synchronization context. -#[derive(Debug)] -pub struct HeadersSync { - /// Synchronization parameters. - params: HeadersSyncParams, - /// The best header number known to source node. - source_best_number: Option, - /// The best header known to target node. - target_best_header: Option>, - /// Headers queue. - headers: QueuedHeaders

, - /// Pause headers submission. - pause_submit: bool, -} - -impl HeadersSync

{ - /// Creates new headers synchronizer. - pub fn new(params: HeadersSyncParams) -> Self { - HeadersSync { - headers: QueuedHeaders::default(), - params, - source_best_number: None, - target_best_header: None, - pause_submit: false, - } - } - - /// Return best header number known to source node. - pub fn source_best_number(&self) -> Option { - self.source_best_number - } - - /// The best header known to target node. - pub fn target_best_header(&self) -> Option> { - self.target_best_header - } - - /// Returns true if we have synced almost all known headers. - pub fn is_almost_synced(&self) -> bool { - match self.source_best_number { - Some(source_best_number) => self - .target_best_header - .map(|best| source_best_number.saturating_sub(best.0) < 4.into()) - .unwrap_or(false), - None => true, - } - } - - /// Returns synchronization status. - pub fn status(&self) -> (&Option>, &Option) { - (&self.target_best_header, &self.source_best_number) - } - - /// Returns reference to the headers queue. - pub fn headers(&self) -> &QueuedHeaders

{ - &self.headers - } - - /// Returns mutable reference to the headers queue. - pub fn headers_mut(&mut self) -> &mut QueuedHeaders

{ - &mut self.headers - } - - /// Select header that needs to be downloaded from the source node. - pub fn select_new_header_to_download(&self) -> Option { - // if we haven't received best header from source node yet, there's nothing we can download - let source_best_number = self.source_best_number?; - - // if we haven't received known best header from target node yet, there's nothing we can - // download - let target_best_header = self.target_best_header.as_ref()?; - - // if there's too many headers in the queue, stop downloading - let in_memory_headers = self.headers.total_headers(); - if in_memory_headers >= self.params.max_future_headers_to_download { - return None - } - - // if queue is empty and best header on target is > than best header on source, - // then we shoud reorganization - let best_queued_number = self.headers.best_queued_number(); - if best_queued_number.is_zero() && source_best_number < target_best_header.0 { - return Some(source_best_number) - } - - // we assume that there were no reorganizations if we have already downloaded best header - let best_downloaded_number = std::cmp::max( - std::cmp::max(best_queued_number, self.headers.best_synced_number()), - target_best_header.0, - ); - if best_downloaded_number >= source_best_number { - return None - } - - // download new header - Some(best_downloaded_number + One::one()) - } - - /// Select orphan header to download. - pub fn select_orphan_header_to_download(&self) -> Option<&QueuedHeader

> { - let orphan_header = self.headers.header(HeaderStatus::Orphan)?; - - // we consider header orphan until we'll find it ancestor that is known to the target node - // => we may get orphan header while we ask target node whether it knows its parent - // => let's avoid fetching duplicate headers - let parent_id = orphan_header.parent_id(); - if self.headers.status(&parent_id) != HeaderStatus::Unknown { - return None - } - - Some(orphan_header) - } - - /// Select headers that need to be submitted to the target node. - pub fn select_headers_to_submit(&self, stalled: bool) -> Option>> { - // maybe we have paused new headers submit? - if self.pause_submit { - return None - } - - // if we operate in backup mode, we only submit headers when sync has stalled - if self.params.target_tx_mode == TargetTransactionMode::Backup && !stalled { - return None - } - - let headers_in_submit_status = self.headers.headers_in_status(HeaderStatus::Submitted); - let headers_to_submit_count = self - .params - .max_headers_in_submitted_status - .checked_sub(headers_in_submit_status)?; - - let mut total_size = 0; - let mut total_headers = 0; - self.headers.headers(HeaderStatus::Ready, |header| { - if total_headers == headers_to_submit_count { - return false - } - if total_headers == self.params.max_headers_in_single_submit { - return false - } - - let encoded_size = P::estimate_size(header); - if total_headers != 0 && - total_size + encoded_size > self.params.max_headers_size_in_single_submit - { - return false - } - - total_size += encoded_size; - total_headers += 1; - - true - }) - } - - /// Receive new target header number from the source node. - pub fn source_best_header_number_response(&mut self, best_header_number: P::Number) { - log::debug!( - target: "bridge", - "Received best header number from {} node: {}", - P::SOURCE_NAME, - best_header_number, - ); - self.source_best_number = Some(best_header_number); - } - - /// Receive new best header from the target node. - /// Returns true if it is different from the previous block known to us. - pub fn target_best_header_response(&mut self, best_header: HeaderIdOf

) -> bool { - log::debug!( - target: "bridge", - "Received best known header from {}: {:?}", - P::TARGET_NAME, - best_header, - ); - - // early return if it is still the same - if self.target_best_header == Some(best_header) { - return false - } - - // remember that this header is now known to the Substrate runtime - self.headers.target_best_header_response(&best_header); - - // prune ancient headers - self.headers.prune(best_header.0.saturating_sub(self.params.prune_depth.into())); - - // finally remember the best header itself - self.target_best_header = Some(best_header); - - // we are ready to submit headers again - if self.pause_submit { - log::debug!( - target: "bridge", - "Ready to submit {} headers to {} node again!", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - - self.pause_submit = false; - } - - true - } - - /// Pause headers submit until best header will be updated on target node. - pub fn pause_submit(&mut self) { - log::debug!( - target: "bridge", - "Stopping submitting {} headers to {} node. Waiting for {} submitted headers to be accepted", - P::SOURCE_NAME, - P::TARGET_NAME, - self.headers.headers_in_status(HeaderStatus::Submitted), - ); - - self.pause_submit = true; - } - - /// Restart synchronization. - pub fn restart(&mut self) { - self.source_best_number = None; - self.target_best_header = None; - self.headers.clear(); - self.pause_submit = false; - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use crate::{ - headers::tests::{header, id}, - sync_loop_tests::{TestHash, TestHeadersSyncPipeline, TestNumber}, - sync_types::HeaderStatus, - }; - use relay_utils::HeaderId; - - fn side_hash(number: TestNumber) -> TestHash { - 1000 + number - } - - pub fn default_sync_params() -> HeadersSyncParams { - HeadersSyncParams { - max_future_headers_to_download: 128, - max_headers_in_submitted_status: 128, - max_headers_in_single_submit: 32, - max_headers_size_in_single_submit: 131_072, - prune_depth: 4096, - target_tx_mode: TargetTransactionMode::Signed, - } - } - - #[test] - fn select_new_header_to_download_works() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - - // both best && target headers are unknown - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // best header is known, target header is unknown - eth_sync.target_best_header = Some(HeaderId(0, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // target header is known, best header is unknown - eth_sync.target_best_header = None; - eth_sync.source_best_number = Some(100); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // when our best block has the same number as the target - eth_sync.target_best_header = Some(HeaderId(100, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // when we actually need a new header - eth_sync.source_best_number = Some(101); - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - - // when we have to reorganize to longer fork - eth_sync.source_best_number = Some(100); - eth_sync.target_best_header = Some(HeaderId(200, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), Some(100)); - - // when there are too many headers scheduled for submitting - for i in 1..1000 { - eth_sync.headers.header_response(header(i).header().clone()); - } - assert_eq!(eth_sync.select_new_header_to_download(), None); - } - - #[test] - fn select_new_header_to_download_works_with_empty_queue() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - eth_sync.source_best_header_number_response(100); - - // when queue is not empty => everything goes as usually - eth_sync.target_best_header_response(header(10).id()); - eth_sync.headers_mut().header_response(header(11).header().clone()); - eth_sync.headers_mut().maybe_extra_response(&header(11).id(), false); - assert_eq!(eth_sync.select_new_header_to_download(), Some(12)); - - // but then queue is drained - eth_sync.headers_mut().target_best_header_response(&header(11).id()); - - // even though it's empty, we know that header#11 is synced - assert_eq!(eth_sync.headers().best_queued_number(), 0); - assert_eq!(eth_sync.headers().best_synced_number(), 11); - assert_eq!(eth_sync.select_new_header_to_download(), Some(12)); - } - - #[test] - fn sync_without_reorgs_works() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.max_headers_in_submitted_status = 1; - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100 - eth_sync.target_best_header_response(id(100)); - - // block #101 is downloaded first - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - eth_sync.headers.header_response(header(101).header().clone()); - - // now header #101 is ready to be submitted - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(101))); - eth_sync.headers.maybe_extra_response(&id(101), false); - assert_eq!(eth_sync.headers.header(HeaderStatus::Ready), Some(&header(101))); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - - // and header #102 is ready to be downloaded - assert_eq!(eth_sync.select_new_header_to_download(), Some(102)); - eth_sync.headers.header_response(header(102).header().clone()); - - // receive submission confirmation - eth_sync.headers.headers_submitted(vec![id(101)]); - - // we have nothing to submit because previous header hasn't been confirmed yet - // (and we allow max 1 submit transaction in the wild) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(102))); - eth_sync.headers.maybe_extra_response(&id(102), false); - assert_eq!(eth_sync.headers.header(HeaderStatus::Ready), Some(&header(102))); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // substrate reports that it has imported block #101 - eth_sync.target_best_header_response(id(101)); - - // and we are ready to submit #102 - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(102)])); - eth_sync.headers.headers_submitted(vec![id(102)]); - - // substrate reports that it has imported block #102 - eth_sync.target_best_header_response(id(102)); - - // and we have nothing to download - assert_eq!(eth_sync.select_new_header_to_download(), None); - } - - #[test] - fn sync_with_orphan_headers_work() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100, but it isn't part of best chain - eth_sync.target_best_header_response(HeaderId(100, side_hash(100))); - - // block #101 is downloaded first - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - eth_sync.headers.header_response(header(101).header().clone()); - - // we can't submit header #101, because its parent status is unknown - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // instead we are trying to determine status of its parent (#100) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeOrphan), Some(&header(101))); - - // and the status is still unknown - eth_sync.headers.maybe_orphan_response(&id(100), false); - - // so we consider #101 orphaned now && will download its parent - #100 - assert_eq!(eth_sync.headers.header(HeaderStatus::Orphan), Some(&header(101))); - eth_sync.headers.header_response(header(100).header().clone()); - - // #101 is now Orphan and #100 is MaybeOrphan => we do not want to retrieve - // header #100 again - assert_eq!(eth_sync.headers.header(HeaderStatus::Orphan), Some(&header(101))); - assert_eq!(eth_sync.select_orphan_header_to_download(), None); - - // we can't submit header #100, because its parent status is unknown - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // instead we are trying to determine status of its parent (#99) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeOrphan), Some(&header(100))); - - // and the status is known, so we move previously orphaned #100 and #101 to ready queue - eth_sync.headers.maybe_orphan_response(&id(99), true); - - // and we are ready to submit #100 - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(100))); - eth_sync.headers.maybe_extra_response(&id(100), false); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(100)])); - eth_sync.headers.headers_submitted(vec![id(100)]); - - // and we are ready to submit #101 - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(101))); - eth_sync.headers.maybe_extra_response(&id(101), false); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - eth_sync.headers.headers_submitted(vec![id(101)]); - } - - #[test] - fn pruning_happens_on_target_best_header_response() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - eth_sync.params.prune_depth = 50; - eth_sync.target_best_header_response(id(100)); - assert_eq!(eth_sync.headers.prune_border(), 50); - } - - #[test] - fn only_submitting_headers_in_backup_mode_when_stalled() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.target_tx_mode = TargetTransactionMode::Backup; - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100 - eth_sync.target_best_header_response(id(100)); - - // block #101 is downloaded first - eth_sync.headers.header_response(header(101).header().clone()); - eth_sync.headers.maybe_extra_response(&id(101), false); - - // ensure that headers are not submitted when sync is not stalled - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // ensure that headers are not submitted when sync is stalled - assert_eq!(eth_sync.select_headers_to_submit(true), Some(vec![&header(101)])); - } - - #[test] - fn does_not_select_new_headers_to_submit_when_submit_is_paused() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.max_headers_in_submitted_status = 1; - - // ethereum reports best header #102 and substrate is at #100 - eth_sync.source_best_header_number_response(102); - eth_sync.target_best_header_response(id(100)); - - // let's prepare #101 and #102 for submitting - eth_sync.headers.header_response(header(101).header().clone()); - eth_sync.headers.maybe_extra_response(&id(101), false); - eth_sync.headers.header_response(header(102).header().clone()); - eth_sync.headers.maybe_extra_response(&id(102), false); - - // when submit is not paused, we're ready to submit #101 - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - - // when submit is paused, we're not ready to submit anything - eth_sync.pause_submit(); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // if best header on substrate node isn't updated, we still not submitting anything - eth_sync.target_best_header_response(id(100)); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // but after it is actually updated, we are ready to submit - eth_sync.target_best_header_response(id(101)); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(102)])); - } -} diff --git a/relays/headers/src/sync_loop.rs b/relays/headers/src/sync_loop.rs deleted file mode 100644 index da8d23dc39de..000000000000 --- a/relays/headers/src/sync_loop.rs +++ /dev/null @@ -1,654 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Entrypoint for running headers synchronization loop. - -use crate::{ - sync::{HeadersSync, HeadersSyncParams}, - sync_loop_metrics::SyncLoopMetrics, - sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SubmittedHeaders}, -}; - -use async_trait::async_trait; -use futures::{future::FutureExt, stream::StreamExt}; -use num_traits::{Saturating, Zero}; -use relay_utils::{ - format_ids, interval, - metrics::{GlobalMetrics, MetricsParams}, - process_future_result, - relay_loop::Client as RelayClient, - retry_backoff, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError, -}; -use std::{ - collections::HashSet, - future::Future, - time::{Duration, Instant}, -}; - -/// When we submit headers to target node, but see no updates of best -/// source block known to target node during STALL_SYNC_TIMEOUT seconds, -/// we consider that our headers are rejected because there has been reorganization in target chain. -/// This reorganization could invalidate our knowledge about sync process (i.e. we have asked if -/// HeaderA is known to target, but then reorganization happened and the answer is different -/// now) => we need to reset sync. -/// The other option is to receive **EVERY** best target header and check if it is -/// direct child of previous best header. But: (1) subscription doesn't guarantee that -/// the subscriber will receive every best header (2) reorganization won't always lead to sync -/// stall and restart is a heavy operation (we forget all in-memory headers). -const STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(5 * 60); -/// Delay after we have seen update of best source header at target node, -/// for us to treat sync stalled. ONLY when relay operates in backup mode. -const BACKUP_STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(10 * 60); -/// Interval between calling sync maintain procedure. -const MAINTAIN_INTERVAL: Duration = Duration::from_secs(30); - -/// Source client trait. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Get best block number. - async fn best_block_number(&self) -> Result; - - /// Get header by hash. - async fn header_by_hash(&self, hash: P::Hash) -> Result; - - /// Get canonical header by number. - async fn header_by_number(&self, number: P::Number) -> Result; - - /// Get completion data by header hash. - async fn header_completion( - &self, - id: HeaderIdOf

, - ) -> Result<(HeaderIdOf

, Option), Self::Error>; - - /// Get extra data by header hash. - async fn header_extra( - &self, - id: HeaderIdOf

, - header: QueuedHeader

, - ) -> Result<(HeaderIdOf

, P::Extra), Self::Error>; -} - -/// Target client trait. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Returns ID of the best header known to the target node. - async fn best_header_id(&self) -> Result, Self::Error>; - - /// Returns true if header is known to the target node. - async fn is_known_header( - &self, - id: HeaderIdOf

, - ) -> Result<(HeaderIdOf

, bool), Self::Error>; - - /// Submit headers. - async fn submit_headers( - &self, - headers: Vec>, - ) -> SubmittedHeaders, Self::Error>; - - /// Returns ID of headers that require to be 'completed' before children can be submitted. - async fn incomplete_headers_ids(&self) -> Result>, Self::Error>; - - /// Submit completion data for header. - async fn complete_header( - &self, - id: HeaderIdOf

, - completion: P::Completion, - ) -> Result, Self::Error>; - - /// Returns true if header requires extra data to be submitted. - async fn requires_extra( - &self, - header: QueuedHeader

, - ) -> Result<(HeaderIdOf

, bool), Self::Error>; -} - -/// Synchronization maintain procedure. -#[async_trait] -pub trait SyncMaintain: 'static + Clone + Send + Sync { - /// Run custom maintain procedures. This is guaranteed to be called when both source and target - /// clients are unoccupied. - async fn maintain(&self, _sync: &mut HeadersSync

) {} -} - -impl SyncMaintain

for () {} - -/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs -/// sync loop. -pub fn metrics_prefix() -> String { - format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) -} - -/// Run headers synchronization. -#[allow(clippy::too_many_arguments)] -pub async fn run>( - source_client: impl SourceClient

, - source_tick: Duration, - target_client: TC, - target_tick: Duration, - sync_maintain: impl SyncMaintain

, - sync_params: HeadersSyncParams, - metrics_params: MetricsParams, - exit_signal: impl Future + 'static + Send, -) -> Result<(), relay_utils::Error> { - let exit_signal = exit_signal.shared(); - relay_utils::relay_loop(source_client, target_client) - .with_metrics(Some(metrics_prefix::

()), metrics_params) - .loop_metric(SyncLoopMetrics::new)? - .standalone_metric(GlobalMetrics::new)? - .expose() - .await? - .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - run_until_connection_lost( - source_client, - source_tick, - target_client, - target_tick, - sync_maintain.clone(), - sync_params.clone(), - metrics, - exit_signal.clone(), - ) - }) - .await -} - -/// Run headers synchronization. -#[allow(clippy::too_many_arguments)] -async fn run_until_connection_lost>( - source_client: impl SourceClient

, - source_tick: Duration, - target_client: TC, - target_tick: Duration, - sync_maintain: impl SyncMaintain

, - sync_params: HeadersSyncParams, - metrics_sync: Option, - exit_signal: impl Future + Send, -) -> Result<(), FailedClient> { - let mut progress_context = (Instant::now(), None, None); - - let mut sync = HeadersSync::

::new(sync_params); - let mut stall_countdown = None; - let mut last_update_time = Instant::now(); - - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = false; - let mut source_best_block_number_required = false; - let source_best_block_number_future = source_client.best_block_number().fuse(); - let source_new_header_future = futures::future::Fuse::terminated(); - let source_orphan_header_future = futures::future::Fuse::terminated(); - let source_extra_future = futures::future::Fuse::terminated(); - let source_completion_future = futures::future::Fuse::terminated(); - let source_go_offline_future = futures::future::Fuse::terminated(); - let source_tick_stream = interval(source_tick).fuse(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = false; - let mut target_best_block_required = false; - let mut target_incomplete_headers_required = true; - let target_best_block_future = target_client.best_header_id().fuse(); - let target_incomplete_headers_future = futures::future::Fuse::terminated(); - let target_extra_check_future = futures::future::Fuse::terminated(); - let target_existence_status_future = futures::future::Fuse::terminated(); - let target_submit_header_future = futures::future::Fuse::terminated(); - let target_complete_header_future = futures::future::Fuse::terminated(); - let target_go_offline_future = futures::future::Fuse::terminated(); - let target_tick_stream = interval(target_tick).fuse(); - - let mut maintain_required = false; - let maintain_stream = interval(MAINTAIN_INTERVAL).fuse(); - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!( - source_best_block_number_future, - source_new_header_future, - source_orphan_header_future, - source_extra_future, - source_completion_future, - source_go_offline_future, - source_tick_stream, - target_best_block_future, - target_incomplete_headers_future, - target_extra_check_future, - target_existence_status_future, - target_submit_header_future, - target_complete_header_future, - target_go_offline_future, - target_tick_stream, - maintain_stream, - exit_signal - ); - - loop { - futures::select! { - source_best_block_number = source_best_block_number_future => { - source_best_block_number_required = false; - - source_client_is_online = process_future_result( - source_best_block_number, - &mut source_retry_backoff, - |source_best_block_number| sync.source_best_header_number_response(source_best_block_number), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best header number from {}", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_new_header = source_new_header_future => { - source_client_is_online = process_future_result( - source_new_header, - &mut source_retry_backoff, - |source_new_header| sync.headers_mut().header_response(source_new_header), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving header from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_orphan_header = source_orphan_header_future => { - source_client_is_online = process_future_result( - source_orphan_header, - &mut source_retry_backoff, - |source_orphan_header| sync.headers_mut().header_response(source_orphan_header), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving orphan header from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_extra = source_extra_future => { - source_client_is_online = process_future_result( - source_extra, - &mut source_retry_backoff, - |(header, extra)| sync.headers_mut().extra_response(&header, extra), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving extra data from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_completion = source_completion_future => { - source_client_is_online = process_future_result( - source_completion, - &mut source_retry_backoff, - |(header, completion)| sync.headers_mut().completion_response(&header, completion), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving completion data from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = source_tick_stream.next() => { - if sync.is_almost_synced() { - source_best_block_number_required = true; - } - }, - target_best_block = target_best_block_future => { - target_best_block_required = false; - - target_client_is_online = process_future_result( - target_best_block, - &mut target_retry_backoff, - |target_best_block| { - let head_updated = sync.target_best_header_response(target_best_block); - if head_updated { - last_update_time = Instant::now(); - } - match head_updated { - // IF head is updated AND there are still our transactions: - // => restart stall countdown timer - true if sync.headers().headers_in_status(HeaderStatus::Submitted) != 0 => - stall_countdown = Some(Instant::now()), - // IF head is updated AND there are no our transactions: - // => stop stall countdown timer - true => stall_countdown = None, - // IF head is not updated AND stall countdown is not yet completed - // => do nothing - false if stall_countdown - .map(|stall_countdown| stall_countdown.elapsed() < STALL_SYNC_TIMEOUT) - .unwrap_or(true) - => (), - // IF head is not updated AND stall countdown has completed - // => restart sync - false => { - log::info!( - target: "bridge", - "Sync has stalled. Restarting {} headers synchronization.", - P::SOURCE_NAME, - ); - stall_countdown = None; - sync.restart(); - }, - } - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best known {} header from {} node", P::SOURCE_NAME, P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - incomplete_headers_ids = target_incomplete_headers_future => { - target_incomplete_headers_required = false; - - target_client_is_online = process_future_result( - incomplete_headers_ids, - &mut target_retry_backoff, - |incomplete_headers_ids| sync.headers_mut().incomplete_headers_response(incomplete_headers_ids), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving incomplete headers from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - target_existence_status = target_existence_status_future => { - target_client_is_online = process_future_result( - target_existence_status, - &mut target_retry_backoff, - |(target_header, target_existence_status)| sync - .headers_mut() - .maybe_orphan_response(&target_header, target_existence_status), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving existence status from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - submitted_headers = target_submit_header_future => { - // following line helps Rust understand the type of `submitted_headers` :/ - let submitted_headers: SubmittedHeaders, TC::Error> = submitted_headers; - let submitted_headers_str = format!("{}", submitted_headers); - let all_headers_rejected = submitted_headers.submitted.is_empty() - && submitted_headers.incomplete.is_empty(); - let has_submitted_headers = sync.headers().headers_in_status(HeaderStatus::Submitted) != 0; - - let maybe_fatal_error = match submitted_headers.fatal_error { - Some(fatal_error) => Err(StringifiedMaybeConnectionError::new( - fatal_error.is_connection_error(), - format!("{:?}", fatal_error), - )), - None if all_headers_rejected && !has_submitted_headers => - Err(StringifiedMaybeConnectionError::new(false, "All headers were rejected".into())), - None => Ok(()), - }; - - let no_fatal_error = maybe_fatal_error.is_ok(); - target_client_is_online = process_future_result( - maybe_fatal_error, - &mut target_retry_backoff, - |_| {}, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error submitting headers to {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - - log::debug!(target: "bridge", "Header submit result: {}", submitted_headers_str); - - sync.headers_mut().headers_submitted(submitted_headers.submitted); - sync.headers_mut().add_incomplete_headers(false, submitted_headers.incomplete); - - // when there's no fatal error, but node has rejected all our headers we may - // want to pause until our submitted headers will be accepted - if no_fatal_error && all_headers_rejected && has_submitted_headers { - sync.pause_submit(); - } - }, - target_complete_header_result = target_complete_header_future => { - target_client_is_online = process_future_result( - target_complete_header_result, - &mut target_retry_backoff, - |completed_header| sync.headers_mut().header_completed(&completed_header), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error completing headers at {}", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - target_extra_check_result = target_extra_check_future => { - target_client_is_online = process_future_result( - target_extra_check_result, - &mut target_retry_backoff, - |(header, extra_check_result)| sync - .headers_mut() - .maybe_extra_response(&header, extra_check_result), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving receipts requirement from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - _ = target_tick_stream.next() => { - target_best_block_required = true; - target_incomplete_headers_required = true; - }, - - _ = maintain_stream.next() => { - maintain_required = true; - }, - _ = exit_signal => { - return Ok(()); - } - } - - // update metrics - if let Some(ref metrics_sync) = metrics_sync { - metrics_sync.update(&sync); - } - - // print progress - progress_context = print_sync_progress(progress_context, &sync); - - // run maintain procedures - if maintain_required && source_client_is_online && target_client_is_online { - log::debug!(target: "bridge", "Maintaining headers sync loop"); - maintain_required = false; - sync_maintain.maintain(&mut sync).await; - } - - // If the target client is accepting requests we update the requests that - // we want it to run - if !maintain_required && target_client_is_online { - // NOTE: Is is important to reset this so that we only have one - // request being processed by the client at a time. This prevents - // race conditions like receiving two transactions with the same - // nonce from the client. - target_client_is_online = false; - - // The following is how we prioritize requests: - // - // 1. Get best block - // - Stops us from downloading or submitting new blocks - // - Only called rarely - // - // 2. Get incomplete headers - // - Stops us from submitting new blocks - // - Only called rarely - // - // 3. Get complete headers - // - Stops us from submitting new blocks - // - // 4. Check if we need extra data from source - // - Stops us from downloading or submitting new blocks - // - // 5. Check existence of header - // - Stops us from submitting new blocks - // - // 6. Submit header - - if target_best_block_required { - log::debug!(target: "bridge", "Asking {} about best block", P::TARGET_NAME); - target_best_block_future.set(target_client.best_header_id().fuse()); - } else if target_incomplete_headers_required { - log::debug!(target: "bridge", "Asking {} about incomplete headers", P::TARGET_NAME); - target_incomplete_headers_future.set(target_client.incomplete_headers_ids().fuse()); - } else if let Some((id, completion)) = sync.headers_mut().header_to_complete() { - log::debug!( - target: "bridge", - "Going to complete header: {:?}", - id, - ); - - target_complete_header_future - .set(target_client.complete_header(id, completion.clone()).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::MaybeExtra) { - log::debug!( - target: "bridge", - "Checking if header submission requires extra: {:?}", - header.id(), - ); - - target_extra_check_future.set(target_client.requires_extra(header.clone()).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::MaybeOrphan) { - // for MaybeOrphan we actually ask for parent' header existence - let parent_id = header.parent_id(); - - log::debug!( - target: "bridge", - "Asking {} node for existence of: {:?}", - P::TARGET_NAME, - parent_id, - ); - - target_existence_status_future.set(target_client.is_known_header(parent_id).fuse()); - } else if let Some(headers) = sync - .select_headers_to_submit(last_update_time.elapsed() > BACKUP_STALL_SYNC_TIMEOUT) - { - log::debug!( - target: "bridge", - "Submitting {} header(s) to {} node: {:?}", - headers.len(), - P::TARGET_NAME, - format_ids(headers.iter().map(|header| header.id())), - ); - - let headers = headers.into_iter().cloned().collect(); - target_submit_header_future.set(target_client.submit_headers(headers).fuse()); - - // remember that we have submitted some headers - if stall_countdown.is_none() { - stall_countdown = Some(Instant::now()); - } - } else { - target_client_is_online = true; - } - } - - // If the source client is accepting requests we update the requests that - // we want it to run - if !maintain_required && source_client_is_online { - // NOTE: Is is important to reset this so that we only have one - // request being processed by the client at a time. This prevents - // race conditions like receiving two transactions with the same - // nonce from the client. - source_client_is_online = false; - - // The following is how we prioritize requests: - // - // 1. Get best block - // - Stops us from downloading or submitting new blocks - // - Only called rarely - // - // 2. Download completion data - // - Stops us from submitting new blocks - // - // 3. Download extra data - // - Stops us from submitting new blocks - // - // 4. Download missing headers - // - Stops us from downloading or submitting new blocks - // - // 5. Downloading new headers - - if source_best_block_number_required { - log::debug!(target: "bridge", "Asking {} node about best block number", P::SOURCE_NAME); - source_best_block_number_future.set(source_client.best_block_number().fuse()); - } else if let Some(id) = sync.headers_mut().incomplete_header() { - log::debug!( - target: "bridge", - "Retrieving completion data for header: {:?}", - id, - ); - source_completion_future.set(source_client.header_completion(id).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::Extra) { - let id = header.id(); - log::debug!( - target: "bridge", - "Retrieving extra data for header: {:?}", - id, - ); - source_extra_future.set(source_client.header_extra(id, header.clone()).fuse()); - } else if let Some(header) = sync.select_orphan_header_to_download() { - // for Orphan we actually ask for parent' header - let parent_id = header.parent_id(); - - // if we have end up with orphan header#0, then we are misconfigured - if parent_id.0.is_zero() { - log::error!( - target: "bridge", - "Misconfiguration. Genesis {} header is considered orphan by {} node", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - return Ok(()) - } - - log::debug!( - target: "bridge", - "Going to download orphan header from {} node: {:?}", - P::SOURCE_NAME, - parent_id, - ); - - source_orphan_header_future.set(source_client.header_by_hash(parent_id.1).fuse()); - } else if let Some(id) = sync.select_new_header_to_download() { - log::debug!( - target: "bridge", - "Going to download new header from {} node: {:?}", - P::SOURCE_NAME, - id, - ); - - source_new_header_future.set(source_client.header_by_number(id).fuse()); - } else { - source_client_is_online = true; - } - } - } -} - -/// Print synchronization progress. -fn print_sync_progress( - progress_context: (Instant, Option, Option), - eth_sync: &HeadersSync

, -) -> (Instant, Option, Option) { - let (prev_time, prev_best_header, prev_target_header) = progress_context; - let now_time = Instant::now(); - let (now_best_header, now_target_header) = eth_sync.status(); - - let need_update = now_time - prev_time > Duration::from_secs(10) || - match (prev_best_header, now_best_header) { - (Some(prev_best_header), Some(now_best_header)) => - now_best_header.0.saturating_sub(prev_best_header) > 10.into(), - _ => false, - }; - if !need_update { - return (prev_time, prev_best_header, prev_target_header) - } - - log::info!( - target: "bridge", - "Synced {:?} of {:?} headers", - now_best_header.map(|id| id.0), - now_target_header, - ); - (now_time, (*now_best_header).map(|id| id.0), *now_target_header) -} diff --git a/relays/headers/src/sync_loop_metrics.rs b/relays/headers/src/sync_loop_metrics.rs deleted file mode 100644 index 1c558c25de9d..000000000000 --- a/relays/headers/src/sync_loop_metrics.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for headers synchronization relay loop. - -use crate::{ - sync::HeadersSync, - sync_types::{HeaderStatus, HeadersSyncPipeline}, -}; - -use num_traits::Zero; -use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64}; - -/// Headers sync metrics. -#[derive(Clone)] -pub struct SyncLoopMetrics { - /// Best syncing headers at "source" and "target" nodes. - best_block_numbers: GaugeVec, - /// Number of headers in given states (see `HeaderStatus`). - blocks_in_state: GaugeVec, -} - -impl SyncLoopMetrics { - /// Create and register headers loop metrics. - pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { - Ok(SyncLoopMetrics { - best_block_numbers: register( - GaugeVec::new( - Opts::new( - metric_name(prefix, "best_block_numbers"), - "Best block numbers on source and target nodes", - ), - &["node"], - )?, - registry, - )?, - blocks_in_state: register( - GaugeVec::new( - Opts::new( - metric_name(prefix, "blocks_in_state"), - "Number of blocks in given state", - ), - &["state"], - )?, - registry, - )?, - }) - } -} - -impl SyncLoopMetrics { - /// Update best block number at source. - pub fn update_best_block_at_source>(&self, source_best_number: Number) { - self.best_block_numbers - .with_label_values(&["source"]) - .set(source_best_number.into()); - } - - /// Update best block number at target. - pub fn update_best_block_at_target>(&self, target_best_number: Number) { - self.best_block_numbers - .with_label_values(&["target"]) - .set(target_best_number.into()); - } - - /// Update metrics. - pub fn update(&self, sync: &HeadersSync

) { - let headers = sync.headers(); - let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero); - let target_best_number = - sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero); - - self.update_best_block_at_source(source_best_number); - self.update_best_block_at_target(target_best_number); - - self.blocks_in_state - .with_label_values(&["maybe_orphan"]) - .set(headers.headers_in_status(HeaderStatus::MaybeOrphan) as _); - self.blocks_in_state - .with_label_values(&["orphan"]) - .set(headers.headers_in_status(HeaderStatus::Orphan) as _); - self.blocks_in_state - .with_label_values(&["maybe_extra"]) - .set(headers.headers_in_status(HeaderStatus::MaybeExtra) as _); - self.blocks_in_state - .with_label_values(&["extra"]) - .set(headers.headers_in_status(HeaderStatus::Extra) as _); - self.blocks_in_state - .with_label_values(&["ready"]) - .set(headers.headers_in_status(HeaderStatus::Ready) as _); - self.blocks_in_state - .with_label_values(&["incomplete"]) - .set(headers.headers_in_status(HeaderStatus::Incomplete) as _); - self.blocks_in_state - .with_label_values(&["submitted"]) - .set(headers.headers_in_status(HeaderStatus::Submitted) as _); - } -} diff --git a/relays/headers/src/sync_loop_tests.rs b/relays/headers/src/sync_loop_tests.rs deleted file mode 100644 index f100998ca83f..000000000000 --- a/relays/headers/src/sync_loop_tests.rs +++ /dev/null @@ -1,602 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg(test)] - -use crate::{ - sync_loop::{run, SourceClient, TargetClient}, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; - -use async_trait::async_trait; -use backoff::backoff::Backoff; -use futures::{future::FutureExt, stream::StreamExt}; -use parking_lot::Mutex; -use relay_utils::{ - metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, - retry_backoff, HeaderId, MaybeConnectionError, -}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::Duration, -}; - -pub type TestNumber = u64; -pub type TestHash = u64; -pub type TestHeaderId = HeaderId; -pub type TestExtra = u64; -pub type TestCompletion = u64; -pub type TestQueuedHeader = QueuedHeader; - -#[derive(Default, Debug, Clone, PartialEq)] -pub struct TestHeader { - pub hash: TestHash, - pub number: TestNumber, - pub parent_hash: TestHash, -} - -impl SourceHeader for TestHeader { - fn id(&self) -> TestHeaderId { - HeaderId(self.number, self.hash) - } - - fn parent_id(&self) -> TestHeaderId { - HeaderId(self.number - 1, self.parent_hash) - } -} - -#[derive(Debug, Clone)] -struct TestError(bool); - -impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - self.0 - } -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct TestHeadersSyncPipeline; - -impl HeadersSyncPipeline for TestHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Source"; - const TARGET_NAME: &'static str = "Target"; - - type Hash = TestHash; - type Number = TestNumber; - type Header = TestHeader; - type Extra = TestExtra; - type Completion = TestCompletion; - - fn estimate_size(_: &TestQueuedHeader) -> usize { - 0 - } -} - -enum SourceMethod { - BestBlockNumber, - HeaderByHash(TestHash), - HeaderByNumber(TestNumber), - HeaderCompletion(TestHeaderId), - HeaderExtra(TestHeaderId, TestQueuedHeader), -} - -#[derive(Clone)] -struct Source { - data: Arc>, - on_method_call: Arc, -} - -struct SourceData { - best_block_number: Result, - header_by_hash: HashMap, - header_by_number: HashMap, - provides_completion: bool, - provides_extra: bool, -} - -impl Source { - pub fn new( - best_block_id: TestHeaderId, - headers: Vec<(bool, TestHeader)>, - on_method_call: impl Fn(SourceMethod, &mut SourceData) + Send + Sync + 'static, - ) -> Self { - Source { - data: Arc::new(Mutex::new(SourceData { - best_block_number: Ok(best_block_id.0), - header_by_hash: headers - .iter() - .map(|(_, header)| (header.hash, header.clone())) - .collect(), - header_by_number: headers - .iter() - .filter_map(|(is_canonical, header)| { - if *is_canonical { - Some((header.hash, header.clone())) - } else { - None - } - }) - .collect(), - provides_completion: true, - provides_extra: true, - })), - on_method_call: Arc::new(on_method_call), - } - } -} - -#[async_trait] -impl RelayClient for Source { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unimplemented!() - } -} - -#[async_trait] -impl SourceClient for Source { - async fn best_block_number(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::BestBlockNumber, &mut *data); - data.best_block_number.clone() - } - - async fn header_by_hash(&self, hash: TestHash) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderByHash(hash), &mut *data); - data.header_by_hash.get(&hash).cloned().ok_or(TestError(false)) - } - - async fn header_by_number(&self, number: TestNumber) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderByNumber(number), &mut *data); - data.header_by_number.get(&number).cloned().ok_or(TestError(false)) - } - - async fn header_completion( - &self, - id: TestHeaderId, - ) -> Result<(TestHeaderId, Option), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderCompletion(id), &mut *data); - if data.provides_completion { - Ok((id, Some(test_completion(id)))) - } else { - Ok((id, None)) - } - } - - async fn header_extra( - &self, - id: TestHeaderId, - header: TestQueuedHeader, - ) -> Result<(TestHeaderId, TestExtra), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderExtra(id, header), &mut *data); - if data.provides_extra { - Ok((id, test_extra(id))) - } else { - Err(TestError(false)) - } - } -} - -enum TargetMethod { - BestHeaderId, - IsKnownHeader(TestHeaderId), - SubmitHeaders(Vec), - IncompleteHeadersIds, - CompleteHeader(TestHeaderId, TestCompletion), - RequiresExtra(TestQueuedHeader), -} - -#[derive(Clone)] -struct Target { - data: Arc>, - on_method_call: Arc, -} - -struct TargetData { - best_header_id: Result, - is_known_header_by_hash: HashMap, - submitted_headers: HashMap, - submit_headers_result: Option>, - completed_headers: HashMap, - requires_completion: bool, - requires_extra: bool, -} - -impl Target { - pub fn new( - best_header_id: TestHeaderId, - headers: Vec, - on_method_call: impl Fn(TargetMethod, &mut TargetData) + Send + Sync + 'static, - ) -> Self { - Target { - data: Arc::new(Mutex::new(TargetData { - best_header_id: Ok(best_header_id), - is_known_header_by_hash: headers.iter().map(|header| (header.1, true)).collect(), - submitted_headers: HashMap::new(), - submit_headers_result: None, - completed_headers: HashMap::new(), - requires_completion: false, - requires_extra: false, - })), - on_method_call: Arc::new(on_method_call), - } - } -} - -#[async_trait] -impl RelayClient for Target { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unimplemented!() - } -} - -#[async_trait] -impl TargetClient for Target { - async fn best_header_id(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::BestHeaderId, &mut *data); - data.best_header_id.clone() - } - - async fn is_known_header(&self, id: TestHeaderId) -> Result<(TestHeaderId, bool), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::IsKnownHeader(id), &mut *data); - data.is_known_header_by_hash - .get(&id.1) - .cloned() - .map(|is_known_header| Ok((id, is_known_header))) - .unwrap_or(Ok((id, false))) - } - - async fn submit_headers( - &self, - headers: Vec, - ) -> SubmittedHeaders { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::SubmitHeaders(headers.clone()), &mut *data); - data.submitted_headers - .extend(headers.iter().map(|header| (header.id().1, header.clone()))); - data.submit_headers_result.take().expect("test must accept headers") - } - - async fn incomplete_headers_ids(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::IncompleteHeadersIds, &mut *data); - if data.requires_completion { - Ok(data - .submitted_headers - .iter() - .filter(|(hash, _)| !data.completed_headers.contains_key(hash)) - .map(|(_, header)| header.id()) - .collect()) - } else { - Ok(HashSet::new()) - } - } - - async fn complete_header( - &self, - id: TestHeaderId, - completion: TestCompletion, - ) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::CompleteHeader(id, completion), &mut *data); - data.completed_headers.insert(id.1, completion); - Ok(id) - } - - async fn requires_extra( - &self, - header: TestQueuedHeader, - ) -> Result<(TestHeaderId, bool), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::RequiresExtra(header.clone()), &mut *data); - if data.requires_extra { - Ok((header.id(), true)) - } else { - Ok((header.id(), false)) - } - } -} - -fn test_tick() -> Duration { - // in ideal world that should have been Duration::from_millis(0), because we do not want - // to sleep in tests at all, but that could lead to `select! {}` always waking on tick - // => not doing actual job - Duration::from_millis(10) -} - -fn test_id(number: TestNumber) -> TestHeaderId { - HeaderId(number, number) -} - -fn test_header(number: TestNumber) -> TestHeader { - let id = test_id(number); - TestHeader { - hash: id.1, - number: id.0, - parent_hash: if number == 0 { TestHash::default() } else { test_id(number - 1).1 }, - } -} - -fn test_forked_id(number: TestNumber, forked_from: TestNumber) -> TestHeaderId { - const FORK_OFFSET: TestNumber = 1000; - - if number == forked_from { - HeaderId(number, number) - } else { - HeaderId(number, FORK_OFFSET + number) - } -} - -fn test_forked_header(number: TestNumber, forked_from: TestNumber) -> TestHeader { - let id = test_forked_id(number, forked_from); - TestHeader { - hash: id.1, - number: id.0, - parent_hash: if number == 0 { - TestHash::default() - } else { - test_forked_id(number - 1, forked_from).1 - }, - } -} - -fn test_completion(id: TestHeaderId) -> TestCompletion { - id.0 -} - -fn test_extra(id: TestHeaderId) -> TestExtra { - id.0 -} - -fn source_reject_completion(method: &SourceMethod) { - if let SourceMethod::HeaderCompletion(_) = method { - unreachable!("HeaderCompletion request is not expected") - } -} - -fn source_reject_extra(method: &SourceMethod) { - if let SourceMethod::HeaderExtra(_, _) = method { - unreachable!("HeaderExtra request is not expected") - } -} - -fn target_accept_all_headers(method: &TargetMethod, data: &mut TargetData, requires_extra: bool) { - if let TargetMethod::SubmitHeaders(ref submitted) = method { - assert_eq!(submitted.iter().all(|header| header.extra().is_some()), requires_extra,); - - data.submit_headers_result = Some(SubmittedHeaders { - submitted: submitted.iter().map(|header| header.id()).collect(), - ..Default::default() - }); - } -} - -fn target_signal_exit_when_header_submitted( - method: &TargetMethod, - header_id: TestHeaderId, - exit_signal: &futures::channel::mpsc::UnboundedSender<()>, -) { - if let TargetMethod::SubmitHeaders(ref submitted) = method { - if submitted.iter().any(|header| header.id() == header_id) { - exit_signal.unbounded_send(()).unwrap(); - } - } -} - -fn target_signal_exit_when_header_completed( - method: &TargetMethod, - header_id: TestHeaderId, - exit_signal: &futures::channel::mpsc::UnboundedSender<()>, -) { - if let TargetMethod::CompleteHeader(completed_id, _) = method { - if *completed_id == header_id { - exit_signal.unbounded_send(()).unwrap(); - } - } -} - -fn run_backoff_test(result: Result<(), TestError>) -> (Duration, Duration) { - let mut backoff = retry_backoff(); - - // no randomness in tests (otherwise intervals may overlap => asserts are failing) - backoff.randomization_factor = 0f64; - - // increase backoff's current interval - let interval1 = backoff.next_backoff().unwrap(); - let interval2 = backoff.next_backoff().unwrap(); - assert!(interval2 > interval1); - - // successful future result leads to backoff's reset - let go_offline_future = futures::future::Fuse::terminated(); - futures::pin_mut!(go_offline_future); - - process_future_result( - result, - &mut backoff, - |_| {}, - &mut go_offline_future, - async_std::task::sleep, - || "Test error".into(), - ); - - (interval2, backoff.next_backoff().unwrap()) -} - -#[test] -fn process_future_result_resets_backoff_on_success() { - let (interval2, interval_after_reset) = run_backoff_test(Ok(())); - assert!(interval2 > interval_after_reset); -} - -#[test] -fn process_future_result_resets_backoff_on_connection_error() { - let (interval2, interval_after_reset) = run_backoff_test(Err(TestError(true))); - assert!(interval2 > interval_after_reset); -} - -#[test] -fn process_future_result_does_not_reset_backoff_on_non_connection_error() { - let (interval2, interval_after_reset) = run_backoff_test(Err(TestError(false))); - assert!(interval2 < interval_after_reset); -} - -struct SyncLoopTestParams { - best_source_header: TestHeader, - headers_on_source: Vec<(bool, TestHeader)>, - best_target_header: TestHeader, - headers_on_target: Vec, - target_requires_extra: bool, - target_requires_completion: bool, - stop_at: TestHeaderId, -} - -fn run_sync_loop_test(params: SyncLoopTestParams) { - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - let target_requires_extra = params.target_requires_extra; - let target_requires_completion = params.target_requires_completion; - let stop_at = params.stop_at; - let source = - Source::new(params.best_source_header.id(), params.headers_on_source, move |method, _| { - if !target_requires_extra { - source_reject_extra(&method); - } - if !target_requires_completion { - source_reject_completion(&method); - } - }); - let target = Target::new( - params.best_target_header.id(), - params.headers_on_target.into_iter().map(|header| header.id()).collect(), - move |method, data| { - target_accept_all_headers(&method, data, target_requires_extra); - if target_requires_completion { - target_signal_exit_when_header_completed(&method, stop_at, &exit_sender); - } else { - target_signal_exit_when_header_submitted(&method, stop_at, &exit_sender); - } - }, - ); - target.data.lock().requires_extra = target_requires_extra; - target.data.lock().requires_completion = target_requires_completion; - - let _ = async_std::task::block_on(run( - source, - test_tick(), - target, - test_tick(), - (), - crate::sync::tests::default_sync_params(), - MetricsParams::disabled(), - exit_receiver.into_future().map(|(_, _)| ()), - )); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header_with_extra() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: true, - target_requires_completion: false, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header_with_completion() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: false, - target_requires_completion: true, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_reorganize_from_shorter_fork() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(3), - headers_on_source: vec![ - (true, test_header(1)), - (true, test_header(2)), - (true, test_header(3)), - (false, test_forked_header(1, 0)), - (false, test_forked_header(2, 0)), - ], - best_target_header: test_forked_header(2, 0), - headers_on_target: vec![test_header(0), test_forked_header(1, 0), test_forked_header(2, 0)], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(3), - }); -} - -#[test] -fn sync_loop_is_able_to_reorganize_from_longer_fork() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(3), - headers_on_source: vec![ - (true, test_header(1)), - (true, test_header(2)), - (true, test_header(3)), - (false, test_forked_header(1, 0)), - (false, test_forked_header(2, 0)), - (false, test_forked_header(3, 0)), - (false, test_forked_header(4, 0)), - (false, test_forked_header(5, 0)), - ], - best_target_header: test_forked_header(5, 0), - headers_on_target: vec![ - test_header(0), - test_forked_header(1, 0), - test_forked_header(2, 0), - test_forked_header(3, 0), - test_forked_header(4, 0), - test_forked_header(5, 0), - ], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(3), - }); -} diff --git a/relays/headers/src/sync_types.rs b/relays/headers/src/sync_types.rs deleted file mode 100644 index 8d93e8bf49fb..000000000000 --- a/relays/headers/src/sync_types.rs +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that are used by headers synchronization components. - -use relay_utils::{format_ids, HeaderId}; -use std::{ops::Deref, sync::Arc}; - -/// Ethereum header synchronization status. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum HeaderStatus { - /// Header is unknown. - Unknown, - /// Header is in MaybeOrphan queue. - MaybeOrphan, - /// Header is in Orphan queue. - Orphan, - /// Header is in MaybeExtra queue. - MaybeExtra, - /// Header is in Extra queue. - Extra, - /// Header is in Ready queue. - Ready, - /// Header is in Incomplete queue. - Incomplete, - /// Header has been recently submitted to the target node. - Submitted, - /// Header is known to the target node. - Synced, -} - -/// Headers synchronization pipeline. -pub trait HeadersSyncPipeline: 'static + Clone + Send + Sync { - /// Name of the headers source. - const SOURCE_NAME: &'static str; - /// Name of the headers target. - const TARGET_NAME: &'static str; - - /// Headers we're syncing are identified by this hash. - type Hash: Eq - + Clone - + Copy - + Send - + Sync - + std::fmt::Debug - + std::fmt::Display - + std::hash::Hash; - /// Headers we're syncing are identified by this number. - type Number: relay_utils::BlockNumberBase; - /// Type of header that we're syncing. - type Header: SourceHeader; - /// Type of extra data for the header that we're receiving from the source node: - /// 1) extra data is required for some headers; - /// 2) target node may answer if it'll require extra data before header is submitted; - /// 3) extra data available since the header creation time; - /// 4) header and extra data are submitted in single transaction. - /// - /// Example: Ethereum transactions receipts. - type Extra: Clone + Send + Sync + PartialEq + std::fmt::Debug; - /// Type of data required to 'complete' header that we're receiving from the source node: - /// 1) completion data is required for some headers; - /// 2) target node can't answer if it'll require completion data before header is accepted; - /// 3) completion data may be generated after header generation; - /// 4) header and completion data are submitted in separate transactions. - /// - /// Example: Substrate GRANDPA justifications. - type Completion: Clone + Send + Sync + std::fmt::Debug; - - /// Function used to estimate size of target-encoded header. - fn estimate_size(source: &QueuedHeader) -> usize; -} - -/// A HeaderId for `HeaderSyncPipeline`. -pub type HeaderIdOf

= - HeaderId<

::Hash,

::Number>; - -/// Header that we're receiving from source node. -pub trait SourceHeader: Clone + std::fmt::Debug + PartialEq + Send + Sync { - /// Returns ID of header. - fn id(&self) -> HeaderId; - /// Returns ID of parent header. - /// - /// Panics if called for genesis header. - fn parent_id(&self) -> HeaderId; -} - -/// Header how it's stored in the synchronization queue. -#[derive(Clone, Debug, PartialEq)] -pub struct QueuedHeader(Arc>); - -impl QueuedHeader

{ - /// Creates new queued header. - pub fn new(header: P::Header) -> Self { - QueuedHeader(Arc::new(QueuedHeaderData { header, extra: None })) - } - - /// Set associated extra data. - pub fn set_extra(self, extra: P::Extra) -> Self { - QueuedHeader(Arc::new(QueuedHeaderData { - header: Arc::try_unwrap(self.0) - .map(|data| data.header) - .unwrap_or_else(|data| data.header.clone()), - extra: Some(extra), - })) - } -} - -impl Deref for QueuedHeader

{ - type Target = QueuedHeaderData

; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// Header how it's stored in the synchronization queue. -#[derive(Clone, Debug, Default, PartialEq)] -pub struct QueuedHeaderData { - header: P::Header, - extra: Option, -} - -impl QueuedHeader

{ - /// Returns ID of header. - pub fn id(&self) -> HeaderId { - self.header.id() - } - - /// Returns ID of parent header. - pub fn parent_id(&self) -> HeaderId { - self.header.parent_id() - } - - /// Returns reference to header. - pub fn header(&self) -> &P::Header { - &self.header - } - - /// Returns reference to associated extra data. - pub fn extra(&self) -> &Option { - &self.extra - } -} - -/// Headers submission result. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct SubmittedHeaders { - /// IDs of headers that have been submitted to target node. - pub submitted: Vec, - /// IDs of incomplete headers. These headers were submitted (so this id is also in `submitted` - /// vec), but all descendants are not. - pub incomplete: Vec, - /// IDs of ignored headers that we have decided not to submit (they are either rejected by - /// target node immediately, or their descendants of incomplete headers). - pub rejected: Vec, - /// Fatal target node error, if it has occurred during submission. - pub fatal_error: Option, -} - -impl Default for SubmittedHeaders { - fn default() -> Self { - SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - } - } -} - -impl std::fmt::Display for SubmittedHeaders { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let submitted = format_ids(self.submitted.iter()); - let incomplete = format_ids(self.incomplete.iter()); - let rejected = format_ids(self.rejected.iter()); - - write!(f, "Submitted: {}, Incomplete: {}, Rejected: {}", submitted, incomplete, rejected) - } -} diff --git a/relays/lib-substrate-relay/src/helpers.rs b/relays/lib-substrate-relay/src/helpers.rs index 01f881998ad0..f95a8e0aba3a 100644 --- a/relays/lib-substrate-relay/src/helpers.rs +++ b/relays/lib-substrate-relay/src/helpers.rs @@ -16,17 +16,11 @@ //! Substrate relay helpers -use relay_utils::metrics::{FloatJsonValueMetric, PrometheusError, Registry}; +use relay_utils::metrics::{FloatJsonValueMetric, PrometheusError}; /// Creates standalone token price metric. -pub fn token_price_metric( - registry: &Registry, - prefix: Option<&str>, - token_id: &str, -) -> Result { +pub fn token_price_metric(token_id: &str) -> Result { FloatJsonValueMetric::new( - registry, - prefix, format!("https://api.coingecko.com/api/v3/simple/price?ids={}&vs_currencies=btc", token_id), format!("$.{}.btc", token_id), format!("{}_to_base_conversion_rate", token_id.replace("-", "_")), diff --git a/relays/lib-substrate-relay/src/messages_lane.rs b/relays/lib-substrate-relay/src/messages_lane.rs index 5e9564cf95e3..6cadb64754a5 100644 --- a/relays/lib-substrate-relay/src/messages_lane.rs +++ b/relays/lib-substrate-relay/src/messages_lane.rs @@ -34,7 +34,9 @@ use relay_substrate_client::{ BlockNumberOf, Chain, Client, HashOf, }; use relay_utils::{ - metrics::{F64SharedRef, MetricsParams}, + metrics::{ + FloatJsonValueMetric, GlobalMetrics, MetricsParams, PrometheusError, StandaloneMetric, + }, BlockNumberBase, }; use sp_core::{storage::StorageKey, Bytes}; @@ -63,6 +65,8 @@ pub struct MessagesRelayParams>, /// Relay strategy pub relay_strategy: Strategy, } @@ -241,110 +245,155 @@ pub fn select_delivery_transaction_limits chain token conversion rate. - pub target_to_base_conversion_rate: Option, - /// Shared reference to the actual source -> chain token conversion rate. - pub source_to_base_conversion_rate: Option, - /// Shared reference to the stored (in the source chain runtime storage) target -> source chain - /// conversion rate. - pub target_to_source_conversion_rate: Option, +pub struct StandaloneMessagesMetrics { + /// Global metrics. + pub global: GlobalMetrics, + /// Storage chain proof overhead metric. + pub source_storage_proof_overhead: StorageProofOverheadMetric, + /// Target chain proof overhead metric. + pub target_storage_proof_overhead: StorageProofOverheadMetric, + /// Source tokens to base conversion rate metric. + pub source_to_base_conversion_rate: Option, + /// Target tokens to base conversion rate metric. + pub target_to_base_conversion_rate: Option, + /// Source tokens to target tokens conversion rate metric. This rate is stored by the target + /// chain. + pub source_to_target_conversion_rate: + Option>, + /// Target tokens to source tokens conversion rate metric. This rate is stored by the source + /// chain. + pub target_to_source_conversion_rate: + Option>, } -impl StandaloneMessagesMetrics { +impl StandaloneMessagesMetrics { + /// Swap source and target sides. + pub fn reverse(self) -> StandaloneMessagesMetrics { + StandaloneMessagesMetrics { + global: self.global, + source_storage_proof_overhead: self.target_storage_proof_overhead, + target_storage_proof_overhead: self.source_storage_proof_overhead, + source_to_base_conversion_rate: self.target_to_base_conversion_rate, + target_to_base_conversion_rate: self.source_to_base_conversion_rate, + source_to_target_conversion_rate: self.target_to_source_conversion_rate, + target_to_source_conversion_rate: self.source_to_target_conversion_rate, + } + } + + /// Register all metrics in the registry. + pub fn register_and_spawn( + self, + metrics: MetricsParams, + ) -> Result { + self.global.register_and_spawn(&metrics.registry)?; + self.source_storage_proof_overhead.register_and_spawn(&metrics.registry)?; + self.target_storage_proof_overhead.register_and_spawn(&metrics.registry)?; + if let Some(m) = self.source_to_base_conversion_rate { + m.register_and_spawn(&metrics.registry)?; + } + if let Some(m) = self.target_to_base_conversion_rate { + m.register_and_spawn(&metrics.registry)?; + } + if let Some(m) = self.target_to_source_conversion_rate { + m.register_and_spawn(&metrics.registry)?; + } + Ok(metrics) + } + /// Return conversion rate from target to source tokens. pub async fn target_to_source_conversion_rate(&self) -> Option { - let target_to_base_conversion_rate = - (*self.target_to_base_conversion_rate.as_ref()?.read().await)?; - let source_to_base_conversion_rate = - (*self.source_to_base_conversion_rate.as_ref()?.read().await)?; - Some(source_to_base_conversion_rate / target_to_base_conversion_rate) + Self::compute_target_to_source_conversion_rate( + *self.target_to_base_conversion_rate.as_ref()?.shared_value_ref().read().await, + *self.source_to_base_conversion_rate.as_ref()?.shared_value_ref().read().await, + ) + } + + /// Return conversion rate from target to source tokens, given conversion rates from + /// target/source tokens to some base token. + fn compute_target_to_source_conversion_rate( + target_to_base_conversion_rate: Option, + source_to_base_conversion_rate: Option, + ) -> Option { + Some(source_to_base_conversion_rate? / target_to_base_conversion_rate?) } } -/// Add general standalone metrics for the message lane relay loop. -pub fn add_standalone_metrics( - metrics_prefix: Option, - metrics_params: MetricsParams, - source_client: Client, +/// Create standalone metrics for the message lane relay loop. +/// +/// All metrics returned by this function are exposed by loops that are serving given lane (`P`) +/// and by loops that are serving reverse lane (`P` with swapped `TargetChain` and `SourceChain`). +pub fn standalone_metrics( + source_client: Client, + target_client: Client, source_chain_token_id: Option<&str>, target_chain_token_id: Option<&str>, + source_to_target_conversion_rate_params: Option<(StorageKey, FixedU128)>, target_to_source_conversion_rate_params: Option<(StorageKey, FixedU128)>, -) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> { - let mut target_to_source_conversion_rate = None; - let mut source_to_base_conversion_rate = None; - let mut target_to_base_conversion_rate = None; - let mut metrics_params = relay_utils::relay_metrics(metrics_prefix, metrics_params) - .standalone_metric(|registry, prefix| { - StorageProofOverheadMetric::new( - registry, - prefix, - source_client.clone(), - format!("{}_storage_proof_overhead", P::SourceChain::NAME.to_lowercase()), - format!("{} storage proof overhead", P::SourceChain::NAME), - ) - })?; - if let Some(( - target_to_source_conversion_rate_storage_key, - initial_target_to_source_conversion_rate, - )) = target_to_source_conversion_rate_params - { - metrics_params = metrics_params.standalone_metric(|registry, prefix| { - let metric = FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( - registry, - prefix, - source_client, - target_to_source_conversion_rate_storage_key, - Some(initial_target_to_source_conversion_rate), - format!( - "{}_{}_to_{}_conversion_rate", - P::SourceChain::NAME, - P::TargetChain::NAME, - P::SourceChain::NAME - ), - format!( - "{} to {} tokens conversion rate (used by {})", - P::TargetChain::NAME, - P::SourceChain::NAME, - P::SourceChain::NAME - ), - )?; - target_to_source_conversion_rate = Some(metric.shared_value_ref()); - Ok(metric) - })?; - } - if let Some(source_chain_token_id) = source_chain_token_id { - metrics_params = metrics_params.standalone_metric(|registry, prefix| { - let metric = - crate::helpers::token_price_metric(registry, prefix, source_chain_token_id)?; - source_to_base_conversion_rate = Some(metric.shared_value_ref()); - Ok(metric) - })?; - } - if let Some(target_chain_token_id) = target_chain_token_id { - metrics_params = metrics_params.standalone_metric(|registry, prefix| { - let metric = - crate::helpers::token_price_metric(registry, prefix, target_chain_token_id)?; - target_to_base_conversion_rate = Some(metric.shared_value_ref()); - Ok(metric) - })?; - } - Ok(( - metrics_params.into_params(), - StandaloneMessagesMetrics { - target_to_base_conversion_rate, - source_to_base_conversion_rate, - target_to_source_conversion_rate, - }, - )) +) -> anyhow::Result> { + Ok(StandaloneMessagesMetrics { + global: GlobalMetrics::new()?, + source_storage_proof_overhead: StorageProofOverheadMetric::new( + source_client.clone(), + format!("{}_storage_proof_overhead", SC::NAME.to_lowercase()), + format!("{} storage proof overhead", SC::NAME), + )?, + target_storage_proof_overhead: StorageProofOverheadMetric::new( + target_client.clone(), + format!("{}_storage_proof_overhead", TC::NAME.to_lowercase()), + format!("{} storage proof overhead", TC::NAME), + )?, + source_to_base_conversion_rate: source_chain_token_id + .map(|source_chain_token_id| { + crate::helpers::token_price_metric(source_chain_token_id).map(Some) + }) + .unwrap_or(Ok(None))?, + target_to_base_conversion_rate: target_chain_token_id + .map(|target_chain_token_id| { + crate::helpers::token_price_metric(target_chain_token_id).map(Some) + }) + .unwrap_or(Ok(None))?, + source_to_target_conversion_rate: source_to_target_conversion_rate_params + .map(|(key, rate)| { + FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( + target_client, + key, + Some(rate), + format!("{}_{}_to_{}_conversion_rate", TC::NAME, SC::NAME, TC::NAME), + format!( + "{} to {} tokens conversion rate (used by {})", + SC::NAME, + TC::NAME, + TC::NAME + ), + ) + .map(Some) + }) + .unwrap_or(Ok(None))?, + target_to_source_conversion_rate: target_to_source_conversion_rate_params + .map(|(key, rate)| { + FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( + source_client, + key, + Some(rate), + format!("{}_{}_to_{}_conversion_rate", SC::NAME, TC::NAME, SC::NAME), + format!( + "{} to {} tokens conversion rate (used by {})", + TC::NAME, + SC::NAME, + SC::NAME + ), + ) + .map(Some) + }) + .unwrap_or(Ok(None))?, + }) } #[cfg(test)] mod tests { use super::*; - use async_std::sync::{Arc, RwLock}; type RialtoToMillauMessagesWeights = pallet_bridge_messages::weights::RialtoWeight; @@ -369,12 +418,9 @@ mod tests { #[async_std::test] async fn target_to_source_conversion_rate_works() { - let metrics = StandaloneMessagesMetrics { - target_to_base_conversion_rate: Some(Arc::new(RwLock::new(Some(183.15)))), - source_to_base_conversion_rate: Some(Arc::new(RwLock::new(Some(12.32)))), - target_to_source_conversion_rate: None, // we don't care - }; - - assert_eq!(metrics.target_to_source_conversion_rate().await, Some(12.32 / 183.15),); + assert_eq!( + StandaloneMessagesMetrics::::compute_target_to_source_conversion_rate(Some(183.15), Some(12.32)), + Some(12.32 / 183.15), + ); } } diff --git a/relays/lib-substrate-relay/src/messages_target.rs b/relays/lib-substrate-relay/src/messages_target.rs index 6f95ffd12f09..eafc6bd3fc5f 100644 --- a/relays/lib-substrate-relay/src/messages_target.rs +++ b/relays/lib-substrate-relay/src/messages_target.rs @@ -55,7 +55,7 @@ pub struct SubstrateMessagesTarget { client: Client, lane: P, lane_id: LaneId, - metric_values: StandaloneMessagesMetrics, + metric_values: StandaloneMessagesMetrics, source_to_target_headers_relay: Option>, } @@ -65,7 +65,7 @@ impl SubstrateMessagesTarget

{ client: Client, lane: P, lane_id: LaneId, - metric_values: StandaloneMessagesMetrics, + metric_values: StandaloneMessagesMetrics, source_to_target_headers_relay: Option>, ) -> Self { SubstrateMessagesTarget { diff --git a/relays/messages/Cargo.toml b/relays/messages/Cargo.toml index ea5d46845c5a..b11f00b957a4 100644 --- a/relays/messages/Cargo.toml +++ b/relays/messages/Cargo.toml @@ -19,3 +19,5 @@ parking_lot = "0.11.0" bp-messages = { path = "../../primitives/messages" } bp-runtime = { path = "../../primitives/runtime" } relay-utils = { path = "../utils" } + +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/relays/messages/src/message_lane.rs b/relays/messages/src/message_lane.rs index 2b2d8029fc74..5c9728ad93ab 100644 --- a/relays/messages/src/message_lane.rs +++ b/relays/messages/src/message_lane.rs @@ -21,6 +21,7 @@ use num_traits::{SaturatingAdd, Zero}; use relay_utils::{BlockNumberBase, HeaderId}; +use sp_arithmetic::traits::AtLeast32BitUnsigned; use std::{fmt::Debug, ops::Sub}; /// One-way message lane. @@ -40,7 +41,8 @@ pub trait MessageLane: 'static + Clone + Send + Sync { /// 1) pay transaction fees; /// 2) pay message delivery and dispatch fee; /// 3) pay relayer rewards. - type SourceChainBalance: Clone + type SourceChainBalance: AtLeast32BitUnsigned + + Clone + Copy + Debug + PartialOrd diff --git a/relays/messages/src/message_lane_loop.rs b/relays/messages/src/message_lane_loop.rs index 2de644091ef2..6cdb2b1aa5ae 100644 --- a/relays/messages/src/message_lane_loop.rs +++ b/relays/messages/src/message_lane_loop.rs @@ -32,10 +32,7 @@ use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; use bp_runtime::messages::DispatchFeePayment; use relay_utils::{ - interval, - metrics::{GlobalMetrics, MetricsParams}, - process_future_result, - relay_loop::Client as RelayClient, + interval, metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, retry_backoff, FailedClient, }; @@ -270,9 +267,8 @@ pub async fn run( let exit_signal = exit_signal.shared(); relay_utils::relay_loop(source_client, target_client) .reconnect_delay(params.reconnect_delay) - .with_metrics(Some(metrics_prefix::

(¶ms.lane)), metrics_params) - .loop_metric(MessageLaneLoopMetrics::new)? - .standalone_metric(GlobalMetrics::new)? + .with_metrics(metrics_params) + .loop_metric(MessageLaneLoopMetrics::new(Some(&metrics_prefix::

(¶ms.lane)))?)? .expose() .await? .run(metrics_prefix::

(¶ms.lane), move |source_client, target_client, metrics| { diff --git a/relays/messages/src/message_race_delivery.rs b/relays/messages/src/message_race_delivery.rs index 1cd2cbd26718..dc994364f178 100644 --- a/relays/messages/src/message_race_delivery.rs +++ b/relays/messages/src/message_race_delivery.rs @@ -521,7 +521,7 @@ where nonces_queue_range: 0..maximal_source_queue_index + 1, }; - let strategy = EnforcementStrategy::new(self.relay_strategy.clone()); + let mut strategy = EnforcementStrategy::new(self.relay_strategy.clone()); let range_end = strategy.decide(reference).await?; let range_begin = source_queue[0].1.begin(); diff --git a/relays/messages/src/metrics.rs b/relays/messages/src/metrics.rs index 8d6e480722e6..eac2f703692a 100644 --- a/relays/messages/src/metrics.rs +++ b/relays/messages/src/metrics.rs @@ -22,7 +22,9 @@ use crate::{ }; use bp_messages::MessageNonce; -use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64}; +use relay_utils::metrics::{ + metric_name, register, GaugeVec, Metric, Opts, PrometheusError, Registry, U64, +}; /// Message lane relay metrics. /// @@ -38,30 +40,22 @@ pub struct MessageLaneLoopMetrics { impl MessageLaneLoopMetrics { /// Create and register messages loop metrics. - pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + pub fn new(prefix: Option<&str>) -> Result { Ok(MessageLaneLoopMetrics { - best_block_numbers: register( - GaugeVec::new( - Opts::new( - metric_name(prefix, "best_block_numbers"), - "Best finalized block numbers", - ), - &["type"], - )?, - registry, + best_block_numbers: GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best finalized block numbers", + ), + &["type"], )?, - lane_state_nonces: register( - GaugeVec::new( - Opts::new(metric_name(prefix, "lane_state_nonces"), "Nonces of the lane state"), - &["type"], - )?, - registry, + lane_state_nonces: GaugeVec::new( + Opts::new(metric_name(prefix, "lane_state_nonces"), "Nonces of the lane state"), + &["type"], )?, }) } -} -impl MessageLaneLoopMetrics { /// Update source client state metrics. pub fn update_source_state(&self, source_client_state: SourceClientState

) { self.best_block_numbers @@ -122,3 +116,11 @@ impl MessageLaneLoopMetrics { .set(target_latest_confirmed_nonce); } } + +impl Metric for MessageLaneLoopMetrics { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.best_block_numbers.clone(), registry)?; + register(self.lane_state_nonces.clone(), registry)?; + Ok(()) + } +} diff --git a/relays/messages/src/relay_strategy/altruistic_strategy.rs b/relays/messages/src/relay_strategy/altruistic_strategy.rs index f932b796b0de..d6fec7f1297b 100644 --- a/relays/messages/src/relay_strategy/altruistic_strategy.rs +++ b/relays/messages/src/relay_strategy/altruistic_strategy.rs @@ -37,7 +37,7 @@ impl RelayStrategy for AltruisticStrategy { SourceClient: MessageLaneSourceClient

, TargetClient: MessageLaneTargetClient

, >( - &self, + &mut self, _reference: &mut RelayReference, ) -> bool { true diff --git a/relays/messages/src/relay_strategy/enforcement_strategy.rs b/relays/messages/src/relay_strategy/enforcement_strategy.rs index 042c05bec00a..1e9ef5bdbf81 100644 --- a/relays/messages/src/relay_strategy/enforcement_strategy.rs +++ b/relays/messages/src/relay_strategy/enforcement_strategy.rs @@ -49,7 +49,7 @@ impl EnforcementStrategy { SourceClient: MessageLaneSourceClient

, TargetClient: MessageLaneTargetClient

, >( - &self, + &mut self, reference: RelayMessagesBatchReference, ) -> Option { let mut hard_selected_count = 0; diff --git a/relays/messages/src/relay_strategy/mix_strategy.rs b/relays/messages/src/relay_strategy/mix_strategy.rs index a267d8ca5f5a..4ac7fe1d0ed0 100644 --- a/relays/messages/src/relay_strategy/mix_strategy.rs +++ b/relays/messages/src/relay_strategy/mix_strategy.rs @@ -47,7 +47,7 @@ impl RelayStrategy for MixStrategy { SourceClient: MessageLaneSourceClient

, TargetClient: MessageLaneTargetClient

, >( - &self, + &mut self, reference: &mut RelayReference, ) -> bool { match self.relayer_mode { diff --git a/relays/messages/src/relay_strategy/mod.rs b/relays/messages/src/relay_strategy/mod.rs index 3e4eef8975dd..d902bd93e5cf 100644 --- a/relays/messages/src/relay_strategy/mod.rs +++ b/relays/messages/src/relay_strategy/mod.rs @@ -52,7 +52,7 @@ pub trait RelayStrategy: 'static + Clone + Send + Sync { SourceClient: MessageLaneSourceClient

, TargetClient: MessageLaneTargetClient

, >( - &self, + &mut self, reference: &mut RelayReference, ) -> bool; } diff --git a/relays/messages/src/relay_strategy/rational_strategy.rs b/relays/messages/src/relay_strategy/rational_strategy.rs index dc408ffd49e2..fd0a1ffafc8b 100644 --- a/relays/messages/src/relay_strategy/rational_strategy.rs +++ b/relays/messages/src/relay_strategy/rational_strategy.rs @@ -41,7 +41,7 @@ impl RelayStrategy for RationalStrategy { SourceClient: MessageLaneSourceClient

, TargetClient: MessageLaneTargetClient

, >( - &self, + &mut self, reference: &mut RelayReference, ) -> bool { // technically, multiple confirmations will be delivered in a single transaction, diff --git a/relays/utils/src/metrics.rs b/relays/utils/src/metrics.rs index 5c796071c6d5..805fe70bfe85 100644 --- a/relays/utils/src/metrics.rs +++ b/relays/utils/src/metrics.rs @@ -46,28 +46,38 @@ pub struct MetricsParams { /// Interface and TCP port to be used when exposing Prometheus metrics. pub address: Option, /// Metrics registry. May be `Some(_)` if several components share the same endpoint. - pub registry: Option, - /// Prefix that must be used in metric names. - pub metrics_prefix: Option, + pub registry: Registry, } -/// Metrics API. -pub trait Metrics: Clone + Send + Sync + 'static {} - -impl Metrics for T {} +/// Metric API. +pub trait Metric: Clone + Send + Sync + 'static { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError>; +} -/// Standalone metrics API. +/// Standalone metric API. /// /// Metrics of this kind know how to update themselves, so we may just spawn and forget the /// asynchronous self-update task. #[async_trait] -pub trait StandaloneMetrics: Metrics { +pub trait StandaloneMetric: Metric { /// Update metric values. async fn update(&self); /// Metrics update interval. fn update_interval(&self) -> Duration; + /// Register and spawn metric. Metric is only spawned if it is registered for the first time. + fn register_and_spawn(self, registry: &Registry) -> Result<(), PrometheusError> { + match self.register(registry) { + Ok(()) => { + self.spawn(); + Ok(()) + }, + Err(PrometheusError::AlreadyReg) => Ok(()), + Err(e) => Err(e), + } + } + /// Spawn the self update task that will keep update metric value at given intervals. fn spawn(self) { async_std::task::spawn(async move { @@ -89,7 +99,7 @@ impl Default for MetricsAddress { impl MetricsParams { /// Creates metrics params so that metrics are not exposed. pub fn disabled() -> Self { - MetricsParams { address: None, registry: None, metrics_prefix: None } + MetricsParams { address: None, registry: Registry::new() } } /// Do not expose metrics. @@ -97,17 +107,11 @@ impl MetricsParams { self.address = None; self } - - /// Set prefix to use in metric names. - pub fn metrics_prefix(mut self, prefix: String) -> Self { - self.metrics_prefix = Some(prefix); - self - } } impl From> for MetricsParams { fn from(address: Option) -> Self { - MetricsParams { address, registry: None, metrics_prefix: None } + MetricsParams { address, registry: Registry::new() } } } diff --git a/relays/utils/src/metrics/float_json_value.rs b/relays/utils/src/metrics/float_json_value.rs index 9404695c1c30..7535cbef9863 100644 --- a/relays/utils/src/metrics/float_json_value.rs +++ b/relays/utils/src/metrics/float_json_value.rs @@ -17,8 +17,8 @@ use crate::{ error::{self, Error}, metrics::{ - metric_name, register, F64SharedRef, Gauge, PrometheusError, Registry, StandaloneMetrics, - F64, + metric_name, register, F64SharedRef, Gauge, Metric, PrometheusError, Registry, + StandaloneMetric, F64, }, }; @@ -44,8 +44,6 @@ pub struct FloatJsonValueMetric { impl FloatJsonValueMetric { /// Create new metric instance with given name and help. pub fn new( - registry: &Registry, - prefix: Option<&str>, url: String, json_path: String, name: String, @@ -55,7 +53,7 @@ impl FloatJsonValueMetric { Ok(FloatJsonValueMetric { url, json_path, - metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + metric: Gauge::new(metric_name(None, &name), help)?, shared_value_ref, }) } @@ -81,8 +79,14 @@ impl FloatJsonValueMetric { } } +impl Metric for FloatJsonValueMetric { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.metric.clone(), registry).map(drop) + } +} + #[async_trait] -impl StandaloneMetrics for FloatJsonValueMetric { +impl StandaloneMetric for FloatJsonValueMetric { fn update_interval(&self) -> Duration { UPDATE_INTERVAL } diff --git a/relays/utils/src/metrics/global.rs b/relays/utils/src/metrics/global.rs index 7746690a0c72..df90a2c48234 100644 --- a/relays/utils/src/metrics/global.rs +++ b/relays/utils/src/metrics/global.rs @@ -17,8 +17,8 @@ //! Global system-wide Prometheus metrics exposed by relays. use crate::metrics::{ - metric_name, register, Gauge, GaugeVec, Opts, PrometheusError, Registry, StandaloneMetrics, - F64, U64, + metric_name, register, Gauge, GaugeVec, Metric, Opts, PrometheusError, Registry, + StandaloneMetric, F64, U64, }; use async_std::sync::{Arc, Mutex}; @@ -40,36 +40,36 @@ pub struct GlobalMetrics { impl GlobalMetrics { /// Create and register global metrics. - pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + pub fn new() -> Result { Ok(GlobalMetrics { system: Arc::new(Mutex::new(System::new_with_specifics(RefreshKind::everything()))), - system_average_load: register( - GaugeVec::new( - Opts::new(metric_name(prefix, "system_average_load"), "System load average"), - &["over"], - )?, - registry, + system_average_load: GaugeVec::new( + Opts::new(metric_name(None, "system_average_load"), "System load average"), + &["over"], )?, - process_cpu_usage_percentage: register( - Gauge::new( - metric_name(prefix, "process_cpu_usage_percentage"), - "Process CPU usage", - )?, - registry, + process_cpu_usage_percentage: Gauge::new( + metric_name(None, "process_cpu_usage_percentage"), + "Process CPU usage", )?, - process_memory_usage_bytes: register( - Gauge::new( - metric_name(prefix, "process_memory_usage_bytes"), - "Process memory (resident set size) usage", - )?, - registry, + process_memory_usage_bytes: Gauge::new( + metric_name(None, "process_memory_usage_bytes"), + "Process memory (resident set size) usage", )?, }) } } +impl Metric for GlobalMetrics { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.system_average_load.clone(), registry)?; + register(self.process_cpu_usage_percentage.clone(), registry)?; + register(self.process_memory_usage_bytes.clone(), registry)?; + Ok(()) + } +} + #[async_trait] -impl StandaloneMetrics for GlobalMetrics { +impl StandaloneMetric for GlobalMetrics { async fn update(&self) { // update system-wide metrics let mut system = self.system.lock().await; diff --git a/relays/utils/src/relay_loop.rs b/relays/utils/src/relay_loop.rs index 4898185a150b..a992aaaf57ee 100644 --- a/relays/utils/src/relay_loop.rs +++ b/relays/utils/src/relay_loop.rs @@ -16,7 +16,7 @@ use crate::{ error::Error, - metrics::{Metrics, MetricsAddress, MetricsParams, PrometheusError, StandaloneMetrics}, + metrics::{Metric, MetricsAddress, MetricsParams}, FailedClient, MaybeConnectionError, }; @@ -53,7 +53,7 @@ pub fn relay_loop(source_client: SC, target_client: TC) -> Loop, params: MetricsParams) -> LoopMetrics<(), (), ()> { +pub fn relay_metrics(params: MetricsParams) -> LoopMetrics<(), (), ()> { LoopMetrics { relay_loop: Loop { reconnect_delay: RECONNECT_DELAY, @@ -62,8 +62,7 @@ pub fn relay_metrics(prefix: Option, params: MetricsParams) -> LoopMetri loop_metric: None, }, address: params.address, - registry: params.registry.unwrap_or_else(|| create_metrics_registry(prefix)), - metrics_prefix: params.metrics_prefix, + registry: params.registry, loop_metric: None, } } @@ -81,7 +80,6 @@ pub struct LoopMetrics { relay_loop: Loop, address: Option, registry: Registry, - metrics_prefix: Option, loop_metric: Option, } @@ -93,11 +91,7 @@ impl Loop { } /// Start building loop metrics using given prefix. - pub fn with_metrics( - self, - prefix: Option, - params: MetricsParams, - ) -> LoopMetrics { + pub fn with_metrics(self, params: MetricsParams) -> LoopMetrics { LoopMetrics { relay_loop: Loop { reconnect_delay: self.reconnect_delay, @@ -106,8 +100,7 @@ impl Loop { loop_metric: None, }, address: params.address, - registry: params.registry.unwrap_or_else(|| create_metrics_registry(prefix)), - metrics_prefix: params.metrics_prefix, + registry: params.registry, loop_metric: None, } } @@ -160,44 +153,23 @@ impl LoopMetrics { /// Add relay loop metrics. /// /// Loop metrics will be passed to the loop callback. - pub fn loop_metric( + pub fn loop_metric( self, - create_metric: impl FnOnce(&Registry, Option<&str>) -> Result, + metric: NewLM, ) -> Result, Error> { - let loop_metric = create_metric(&self.registry, self.metrics_prefix.as_deref())?; + metric.register(&self.registry)?; Ok(LoopMetrics { relay_loop: self.relay_loop, address: self.address, registry: self.registry, - metrics_prefix: self.metrics_prefix, - loop_metric: Some(loop_metric), + loop_metric: Some(metric), }) } - /// Add standalone metrics. - pub fn standalone_metric( - self, - create_metric: impl FnOnce(&Registry, Option<&str>) -> Result, - ) -> Result { - // since standalone metrics are updating themselves, we may just ignore the fact that the - // same standalone metric is exposed by several loops && only spawn single metric - match create_metric(&self.registry, self.metrics_prefix.as_deref()) { - Ok(standalone_metrics) => standalone_metrics.spawn(), - Err(PrometheusError::AlreadyReg) => (), - Err(e) => return Err(e.into()), - } - - Ok(self) - } - /// Convert into `MetricsParams` structure so that metrics registry may be extended later. pub fn into_params(self) -> MetricsParams { - MetricsParams { - address: self.address, - registry: Some(self.registry), - metrics_prefix: self.metrics_prefix, - } + MetricsParams { address: self.address, registry: self.registry } } /// Expose metrics using address passed at creation. @@ -274,15 +246,3 @@ pub async fn reconnect_failed_client( break } } - -/// Create new registry with global metrics. -fn create_metrics_registry(prefix: Option) -> Registry { - match prefix { - Some(prefix) => { - assert!(!prefix.is_empty(), "Metrics prefix can not be empty"); - Registry::new_custom(Some(prefix), None) - .expect("only fails if prefix is empty; prefix is not empty; qed") - }, - None => Registry::new(), - } -} diff --git a/scripts/run-eth2sub-relay.sh b/scripts/run-eth2sub-relay.sh deleted file mode 100755 index 2cf64a93780d..000000000000 --- a/scripts/run-eth2sub-relay.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Ethereum to Substrate relay. Needs running -# Substrate and Ethereum nodes in order to work. - -RUST_LOG=rpc=trace,bridge=trace ./target/debug/ethereum-poa-relay eth-to-sub diff --git a/scripts/run-openethereum-node.sh b/scripts/run-openethereum-node.sh deleted file mode 100755 index 62089baffe45..000000000000 --- a/scripts/run-openethereum-node.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -# This script assumes that an OpenEthereum build is available. The repo -# should be at the same level as the `parity-bridges-common` repo. - -RUST_LOG=rpc=trace,txqueue=trace,bridge-builtin=trace \ -../openethereum/target/debug/openethereum \ - --config="$(pwd)"/deployments/dev/poa-config/poa-node-config \ - --node-key=arthur \ - --engine-signer=0x005e714f896a8b7cede9d38688c1a81de72a58e4 \ - --base-path=/tmp/oe-dev-node \