From d5e2170e26fc2fec3b21682232c507f46f2f518e Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 8 Jul 2024 18:35:42 -0500 Subject: [PATCH] Fix segfault with custom page sizes on aarch64 (#8918) (#8920) This commit fixes an issue with static memory initialization and custom page sizes interacting together on aarch64 Linux. (is that specific enough?) When static memory initialization is enabled chunks of memory to initialize the linear memory are made in host-page-size increments of memory. This is done to enable page-mapping via copy-on-write if customized. With the custom page sizes proposal, however, for the first time it's possible for a linear memory to be smaller than this chunk of memory. This means that a virtual memory allocation of a single host page can be made which is smaller than the initialization chunk. This currently only happens on aarch64 Linux where we coarsely approximate that the host page size is 64k but many hosts run with 4k pages. This means that a 64k initializer is created but the host only allocates 4k for a linear memory. This means that memory initialization can crash when a 64k initializer is copied into a 4k memory. This was not caught via fuzzing because fuzzing only runs on x86_64. This was not caught via CI because on CI guard pages are disabled entirely on QEMU and we got lucky in that a number of virtual memory allocations were all placed next to each other meaning that this copy was probably corrupting some other memory. Locally this was found by running tests on `main` as-is on AArch64 Linux (by bjorn3). This commit implements a few safeguards and a fix for this issue: * On CI with QEMU modestly-size guard pages are now enabled to catch this sooner in development should it happen again in the future. * An `assert!` is added during memory initialization that the memory copy is indeed valid. This causes the tests to fail as-is on `main` even on x86_64. * The issue itself is fixed by bailing out of static memory initialization should the host page size exceed the wasm page size which can now happen on aarch64 Linux with smaller page sizes. --- crates/environ/src/compile/module_environ.rs | 13 ++++++++++++- .../wasmtime/src/runtime/vm/instance/allocator.rs | 6 +++++- tests/wast.rs | 6 ++++-- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/crates/environ/src/compile/module_environ.rs b/crates/environ/src/compile/module_environ.rs index f8edcc8455a7..7c90f15960e1 100644 --- a/crates/environ/src/compile/module_environ.rs +++ b/crates/environ/src/compile/module_environ.rs @@ -996,9 +996,20 @@ impl ModuleTranslation<'_> { // Validate that the memory information collected is indeed valid for // static memory initialization. - for info in info.values().filter(|i| i.data_size > 0) { + for (i, info) in info.iter().filter(|(_, info)| info.data_size > 0) { let image_size = info.max_addr - info.min_addr; + // Simplify things for now by bailing out entirely if any memory has + // a page size smaller than the host's page size. This fixes a case + // where currently initializers are created in host-page-size units + // of length which means that a larger-than-the-entire-memory + // initializer can be created. This can be handled technically but + // would require some more changes to help fix the assert elsewhere + // that this protects against. + if self.module.memory_plans[i].memory.page_size() < page_size { + return; + } + // If the range of memory being initialized is less than twice the // total size of the data itself then it's assumed that static // initialization is ok. This means we'll at most double memory diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator.rs b/crates/wasmtime/src/runtime/vm/instance/allocator.rs index 766293eab0fe..ee39886cb884 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator.rs @@ -732,7 +732,11 @@ fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<()> { unsafe { let src = self.instance.wasm_data(init.data.clone()); - let dst = memory.base.add(usize::try_from(init.offset).unwrap()); + let offset = usize::try_from(init.offset).unwrap(); + let dst = memory.base.add(offset); + + assert!(offset + src.len() <= memory.current_length()); + // FIXME audit whether this is safe in the presence of shared // memory // (https://github.com/bytecodealliance/wasmtime/issues/4203). diff --git a/tests/wast.rs b/tests/wast.rs index 789ab1cb70af..71527d4e32a2 100644 --- a/tests/wast.rs +++ b/tests/wast.rs @@ -294,8 +294,10 @@ fn run_wast(wast: &Path, strategy: Strategy, pooling: bool) -> anyhow::Result<() cfg.static_memory_maximum_size(0); } cfg.dynamic_memory_reserved_for_growth(0); - cfg.static_memory_guard_size(0); - cfg.dynamic_memory_guard_size(0); + + let small_guard = 64 * 1024; + cfg.static_memory_guard_size(small_guard); + cfg.dynamic_memory_guard_size(small_guard); } let _pooling_lock = if pooling {