diff --git a/.github/commands-readme.md b/.github/commands-readme.md index 793524e056f8..ce4e0fd0d789 100644 --- a/.github/commands-readme.md +++ b/.github/commands-readme.md @@ -10,6 +10,7 @@ The current available command actions are: - [Command FMT](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-fmt.yml) - [Command Update UI](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-update-ui.yml) +- [Command Prdoc](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-prdoc.yml) - [Command Sync](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-sync.yml) - [Command Bench](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench.yml) - [Command Bench All](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench-all.yml) @@ -235,6 +236,16 @@ You can use the following [`gh cli`](https://cli.github.com/) inside the repo: gh workflow run command-bench-overheard.yml -f pr=1000 -f benchmark=substrate -f runtime=rococo -f target_dir=substrate ``` +### PrDoc + +Generate a PrDoc with the crates populated by all modified crates. + +Options: +- `pr`: The PR number to generate the PrDoc for. +- `audience`: The audience of whom the changes may concern. +- `bump`: A default bump level for all crates. The PrDoc will likely need to be edited to reflect the actual changes after generation. +- `overwrite`: Whether to overwrite any existing PrDoc. + ### Sync Run sync and commit back results to PR. diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py new file mode 100644 index 000000000000..b7b2e6f970fa --- /dev/null +++ b/.github/scripts/generate-prdoc.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 + +""" +Generate the PrDoc for a Pull Request with a specific number, audience and bump level. + +It downloads and parses the patch from the GitHub API to opulate the prdoc with all modified crates. +This will delete any prdoc that already exists for the PR if `--force` is passed. + +Usage: + python generate-prdoc.py --pr 1234 --audience "TODO" --bump "TODO" +""" + +import argparse +import os +import re +import sys +import subprocess +import toml +import yaml +import requests + +from github import Github +import whatthepatch +from cargo_workspace import Workspace + +# Download the patch and pass the info into `create_prdoc`. +def from_pr_number(n, audience, bump, force): + print(f"Fetching PR '{n}' from GitHub") + g = Github() + + repo = g.get_repo("paritytech/polkadot-sdk") + pr = repo.get_pull(n) + + patch_url = pr.patch_url + patch = requests.get(patch_url).text + + create_prdoc(n, audience, pr.title, pr.body, patch, bump, force) + +def create_prdoc(pr, audience, title, description, patch, bump, force): + path = f"prdoc/pr_{pr}.prdoc" + + if os.path.exists(path): + if force == True: + print(f"Overwriting existing PrDoc for PR {pr}") + else: + print(f"PrDoc already exists for PR {pr}. Use --force to overwrite.") + sys.exit(1) + else: + print(f"No preexisting PrDoc for PR {pr}") + + prdoc = { "doc": [{}], "crates": [] } + + prdoc["title"] = title + prdoc["doc"][0]["audience"] = audience + prdoc["doc"][0]["description"] = description + + workspace = Workspace.from_path(".") + + modified_paths = [] + for diff in whatthepatch.parse_patch(patch): + modified_paths.append(diff.header.new_path) + + modified_crates = {} + for p in modified_paths: + # Go up until we find a Cargo.toml + p = os.path.join(workspace.path, p) + while not os.path.exists(os.path.join(p, "Cargo.toml")): + p = os.path.dirname(p) + + with open(os.path.join(p, "Cargo.toml")) as f: + manifest = toml.load(f) + + if not "package" in manifest: + print(f"File was not in any crate: {p}") + continue + + crate_name = manifest["package"]["name"] + if workspace.crate_by_name(crate_name).publish: + modified_crates[crate_name] = True + else: + print(f"Skipping unpublished crate: {crate_name}") + + print(f"Modified crates: {modified_crates.keys()}") + + for crate_name in modified_crates.keys(): + entry = { "name": crate_name } + + if bump == 'silent' or bump == 'ignore' or bump == 'no change': + entry["validate"] = False + else: + entry["bump"] = bump + + print(f"Adding crate {entry}") + prdoc["crates"].append(entry) + + # write the parsed PR documentation back to the file + with open(path, "w") as f: + yaml.dump(prdoc, f) + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--pr", type=int, required=True) + parser.add_argument("--audience", type=str, default="TODO") + parser.add_argument("--bump", type=str, default="TODO") + parser.add_argument("--force", type=str) + return parser.parse_args() + +if __name__ == "__main__": + args = parse_args() + force = True if args.force.lower() == "true" else False + print(f"Args: {args}, force: {force}") + from_pr_number(args.pr, args.audience, args.bump, force) diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml new file mode 100644 index 000000000000..da8f14089cb8 --- /dev/null +++ b/.github/workflows/command-prdoc.yml @@ -0,0 +1,78 @@ +name: Command PrDoc + +on: + workflow_dispatch: + inputs: + pr: + type: number + description: Number of the Pull Request + required: true + bump: + type: choice + description: Default bump level for all crates + default: "TODO" + required: true + options: + - "TODO" + - "no change" + - "patch" + - "minor" + - "major" + audience: + type: choice + description: Audience of the PrDoc + default: "TODO" + required: true + options: + - "TODO" + - "Runtime Dev" + - "Runtime User" + - "Node Dev" + - "Node User" + overwrite: + type: choice + description: Overwrite existing PrDoc + default: "true" + required: true + options: + - "true" + - "false" + +concurrency: + group: command-prdoc + cancel-in-progress: true + +jobs: + cmd-prdoc: + runs-on: ubuntu-latest + timeout-minutes: 20 + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Generate PrDoc + run: | + python3 -m pip install -q cargo-workspace PyGithub whatthepatch pyyaml toml + + python3 .github/scripts/generate-prdoc.py --pr "${{ inputs.pr }}" --bump "${{ inputs.bump }}" --audience "${{ inputs.audience }}" --force "${{ inputs.overwrite }}" + + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed ❌

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - name: Push Commit + uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: Add PrDoc (auto generated) + branch: ${{ steps.gh.outputs.branch }} + file_pattern: 'prdoc/*.prdoc' diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/subsystem-benchmarks.yml new file mode 100644 index 000000000000..7c19b420a6ac --- /dev/null +++ b/.github/workflows/subsystem-benchmarks.yml @@ -0,0 +1,82 @@ +on: + push: + branches: + - master + pull_request: + types: [ opened, synchronize, reopened, closed, labeled ] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: write + +jobs: + set-image: + # TODO: remove once migration is complete or this workflow is fully stable + if: contains(github.event.label.name, 'GHA-migration') + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + + build: + needs: [ set-image ] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + BENCH_DIR: ./charts/bench/${{ matrix.features.bench }} + BENCH_FILE_NAME: ${{ matrix.features.bench }} + strategy: + fail-fast: false + matrix: + features: [ + { name: "polkadot-availability-recovery", bench: "availability-recovery-regression-bench" }, + { name: "polkadot-availability-distribution", bench: "availability-distribution-regression-bench" }, + { name: "polkadot-node-core-approval-voting", bench: "approval-voting-regression-bench" }, + { name: "polkadot-statement-distribution", bench: "statement-distribution-regression-bench" } + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Check Rust + run: | + rustup show + rustup +nightly show + + - name: Run Benchmarks + continue-on-error: true + id: run-benchmarks + run: | + cargo bench -p ${{ matrix.features.name }} --bench ${{ matrix.features.bench }} --features subsystem-benchmarks || echo "Benchmarks failed" + ls -lsa ./charts + mkdir -p $BENCH_DIR || echo "Directory exists" + cp charts/${BENCH_FILE_NAME}.json $BENCH_DIR + ls -lsa $BENCH_DIR + # Fixes "detected dubious ownership" error in the ci + git config --global --add safe.directory '*' + + - name: Publish result to GH Pages + if: ${{ steps.run-benchmarks.outcome == 'success' }} + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + name: ${{ env.BENCH_FILE_NAME }} + output-file-path: ${{ env.BENCH_DIR }}/${{ env.BENCH_FILE_NAME }}.json + benchmark-data-dir-path: ${{ env.BENCH_DIR }} + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: ${{ github.event_name == 'pull_request' }} # will comment on PRs if regression is detected + auto-push: false # TODO: enable when gitlab part is removed ${{ github.ref == 'refs/heads/master' }} + diff --git a/Cargo.lock b/Cargo.lock index 87ff24663ffc..60be41563299 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4256,12 +4256,8 @@ dependencies = [ name = "cumulus-primitives-aura" version = "0.7.0" dependencies = [ - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-primitives", "sp-api", "sp-consensus-aura", - "sp-runtime", ] [[package]] @@ -4289,8 +4285,6 @@ dependencies = [ "scale-info", "sp-core", "sp-inherents", - "sp-runtime", - "sp-state-machine", "sp-trie", ] @@ -4343,8 +4337,6 @@ dependencies = [ "pallet-asset-conversion", "parity-scale-codec", "polkadot-runtime-common", - "polkadot-runtime-parachains", - "sp-io", "sp-runtime", "staging-xcm", "staging-xcm-builder", @@ -6778,9 +6770,9 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac 0.12.1", ] @@ -7910,9 +7902,9 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" +checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" dependencies = [ "bs58 0.5.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 2333dd15fce8..74b12f96603a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -819,7 +819,7 @@ lazy_static = { version = "1.4.0" } libc = { version = "0.2.155" } libfuzzer-sys = { version = "0.4" } libp2p = { version = "0.52.4" } -libp2p-identity = { version = "0.2.3" } +libp2p-identity = { version = "0.2.9" } libsecp256k1 = { version = "0.7.0", default-features = false } linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 062b9ce736e7..185b2d40833f 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -10,24 +10,14 @@ description = "Core primitives for Aura in Cumulus" workspace = true [dependencies] -codec = { features = ["derive"], workspace = true } # Substrate sp-api = { workspace = true } sp-consensus-aura = { workspace = true } -sp-runtime = { workspace = true } - -# Polkadot -polkadot-core-primitives = { workspace = true } -polkadot-primitives = { workspace = true } [features] default = ["std"] std = [ - "codec/std", - "polkadot-core-primitives/std", - "polkadot-primitives/std", "sp-api/std", "sp-consensus-aura/std", - "sp-runtime/std", ] diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 172af4b9ec63..a4271d3fd9cc 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -17,8 +17,6 @@ scale-info = { features = ["derive"], workspace = true } # Substrate sp-core = { workspace = true } sp-inherents = { workspace = true } -sp-runtime = { optional = true, workspace = true } -sp-state-machine = { optional = true, workspace = true } sp-trie = { workspace = true } # Cumulus @@ -33,7 +31,5 @@ std = [ "scale-info/std", "sp-core/std", "sp-inherents/std", - "sp-runtime?/std", - "sp-state-machine?/std", "sp-trie/std", ] diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 5984fa77a2c5..a557e881e26b 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -174,6 +174,9 @@ where let storage_size_diff = benchmarked_weight.abs_diff(consumed_weight as u64); + let extrinsic_len = frame_system::AllExtrinsicsLen::::get().unwrap_or(0); + let node_side_pov_size = post_dispatch_proof_size.saturating_add(extrinsic_len.into()); + // This value will be reclaimed by [`frame_system::CheckWeight`], so we need to calculate // that in. frame_system::BlockWeight::::mutate(|current| { @@ -190,6 +193,19 @@ where ); current.reduce(Weight::from_parts(0, storage_size_diff), info.class) } + + // If we encounter a situation where the node-side proof size is already higher than + // what we have in the runtime bookkeeping, we add the difference to the `BlockWeight`. + // This prevents that the proof size grows faster than the runtime proof size. + let block_weight_proof_size = current.total().proof_size(); + let missing_from_node = node_side_pov_size.saturating_sub(block_weight_proof_size); + if missing_from_node > 0 { + log::warn!( + target: LOG_TARGET, + "Node-side PoV size higher than runtime proof size weight. node-side: {node_side_pov_size} extrinsic_len: {extrinsic_len} runtime: {block_weight_proof_size}, missing: {missing_from_node}. Setting to node-side proof size." + ); + current.accrue(Weight::from_parts(0, missing_from_node), info.class); + } }); Ok(()) } @@ -332,6 +348,82 @@ mod tests { }) } + #[test] + fn sets_to_node_storage_proof_if_higher() { + // The storage proof reported by the proof recorder is higher than what is stored on + // the runtime side. + { + let mut test_ext = setup_test_externalities(&[1000, 1005]); + + test_ext.execute_with(|| { + // Stored in BlockWeight is 5 + set_current_storage_weight(5); + + // Benchmarked storage weight: 10 + let info = DispatchInfo { weight: Weight::from_parts(0, 10), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(1000)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + // We expect that the storage weight was set to the node-side proof size (1005) + + // extrinsics length (150) + assert_eq!(get_storage_weight().total().proof_size(), 1155); + }) + } + + // In this second scenario the proof size on the node side is only lower + // after reclaim happened. + { + let mut test_ext = setup_test_externalities(&[175, 180]); + test_ext.execute_with(|| { + set_current_storage_weight(85); + + // Benchmarked storage weight: 100 + let info = + DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // After this pre_dispatch, the BlockWeight proof size will be + // 85 (initial) + 100 (benched) + 150 (tx length) = 335 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(175)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + + // First we will reclaim 95, which leaves us with 240 BlockWeight. This is lower + // than 180 (proof size hf) + 150 (length), so we expect it to be set to 330. + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + // We expect that the storage weight was set to the node-side proof weight + assert_eq!(get_storage_weight().total().proof_size(), 330); + }) + } + } + #[test] fn does_nothing_without_extension() { let mut test_ext = new_test_ext(); @@ -545,7 +637,7 @@ mod tests { #[test] fn test_nothing_relcaimed() { - let mut test_ext = setup_test_externalities(&[100, 200]); + let mut test_ext = setup_test_externalities(&[0, 100]); test_ext.execute_with(|| { set_current_storage_weight(0); @@ -568,7 +660,7 @@ mod tests { .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); // Should return `setup_test_externalities` proof recorder value: 100. - assert_eq!(pre, Some(100)); + assert_eq!(pre, Some(0)); // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 82d18c8c0aac..2ca8b82001d5 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -15,13 +15,11 @@ log = { workspace = true } # Substrate frame-support = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } pallet-asset-conversion = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true } -polkadot-runtime-parachains = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } xcm-builder = { workspace = true } @@ -38,8 +36,6 @@ std = [ "log/std", "pallet-asset-conversion/std", "polkadot-runtime-common/std", - "polkadot-runtime-parachains/std", - "sp-io/std", "sp-runtime/std", "xcm-builder/std", "xcm-executor/std", @@ -51,7 +47,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 5101a97dc842..74561e931e7e 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -809,7 +809,7 @@ impl XcmExecutor { }; let actual_weight = maybe_actual_weight.unwrap_or(weight); let surplus = weight.saturating_sub(actual_weight); - // We assume that the `Config::Weigher` will counts the `require_weight_at_most` + // We assume that the `Config::Weigher` will count the `require_weight_at_most` // for the estimate of how much weight this instruction will take. Now that we know // that it's less, we credit it. // diff --git a/prdoc/pr_5281.prdoc b/prdoc/pr_5281.prdoc new file mode 100644 index 000000000000..60feab412aff --- /dev/null +++ b/prdoc/pr_5281.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: PoV-Reclaim - Set `BlockWeight` to node-side PoV size if mismatch is detected + +doc: + - audience: Runtime Dev + description: | + After this change, the `StorageWeightReclaim` `SignedExtension` will check the node-side PoV size after every + extrinsic. If we detect a case where the returned proof size is higher than the `BlockWeight` value of the + runtime, we set `BlockWeight` to the size returned from the node. + +crates: + - name: cumulus-primitives-storage-weight-reclaim + bump: patch + - name: frame-system + bump: minor diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 0c6ff2cb8ddb..abacfa7b62cc 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -917,7 +917,7 @@ pub mod pallet { /// Total length (in bytes) for all extrinsics put together, for the current block. #[pallet::storage] - pub(super) type AllExtrinsicsLen = StorageValue<_, u32>; + pub type AllExtrinsicsLen = StorageValue<_, u32>; /// Map of block numbers to block hashes. #[pallet::storage]