diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 6d5bca5731331..ff27e4000163a 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -73,7 +73,9 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>( cid.promoted.map_or_else(String::new, |p| format!("::{p:?}")) ); - ecx.push_stack_frame( + // This can't use `init_stack_frame` since `body` is not a function, + // so computing its ABI would fail. It's also not worth it since there are no arguments to pass. + ecx.push_stack_frame_raw( cid.instance, body, &ret.clone().into(), diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 79e8e2127765a..a075bdc191181 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -24,8 +24,9 @@ use crate::errors::{LongRunning, LongRunningWarn}; use crate::fluent_generated as fluent; use crate::interpret::{ self, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom, throw_unsup, - throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, FnVal, Frame, + throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame, GlobalAlloc, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar, + StackPopCleanup, }; /// When hitting this many interpreted terminators we emit a deny by default lint @@ -306,17 +307,15 @@ impl<'tcx> CompileTimeInterpCx<'tcx> { let align = ImmTy::from_uint(target_align, args[1].layout).into(); let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; - // We replace the entire function call with a "tail call". - // Note that this happens before the frame of the original function - // is pushed on the stack. - self.eval_fn_call( - FnVal::Instance(instance), - (CallAbi::Rust, fn_abi), + // Push the stack frame with our own adjusted arguments. + self.init_stack_frame( + instance, + self.load_mir(instance.def, None)?, + fn_abi, &[FnArg::Copy(addr), FnArg::Copy(align)], /* with_caller_location = */ false, dest, - ret, - mir::UnwindAction::Unreachable, + StackPopCleanup::Goto { ret, unwind: mir::UnwindAction::Unreachable }, )?; Ok(ControlFlow::Break(())) } else { diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/call.rs similarity index 60% rename from compiler/rustc_const_eval/src/interpret/terminator.rs rename to compiler/rustc_const_eval/src/interpret/call.rs index 47d0e22b527d3..2c5147678e8cb 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/call.rs @@ -1,24 +1,23 @@ +//! Manages calling a concrete function (with known MIR body) with argument passing, +//! and returning the return value to the caller. use std::borrow::Cow; -use either::Either; +use either::{Left, Right}; use rustc_middle::ty::layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, AdtDef, Instance, Ty}; use rustc_middle::{bug, mir, span_bug}; -use rustc_span::source_map::Spanned; use rustc_span::sym; use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; use rustc_target::abi::{self, FieldIdx, Integer}; use rustc_target::spec::abi::Abi; -use tracing::trace; +use tracing::{info, instrument, trace}; use super::{ throw_ub, throw_ub_custom, throw_unsup_format, CtfeProvenance, FnVal, ImmTy, InterpCx, - InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, Provenance, Scalar, - StackPopCleanup, + InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, Provenance, ReturnAction, Scalar, + StackPopCleanup, StackPopInfo, }; use crate::fluent_generated as fluent; -use crate::interpret::eval_context::StackPopInfo; -use crate::interpret::ReturnAction; /// An argment passed to a function. #[derive(Clone, Debug)] @@ -39,15 +38,6 @@ impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> { } } -struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> { - callee: FnVal<'tcx, M::ExtraFnVal>, - args: Vec>, - fn_sig: ty::FnSig<'tcx>, - fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>, - /// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`]) - with_caller_location: bool, -} - impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the /// original memory occurs. @@ -67,7 +57,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect() } - pub fn fn_arg_field( + /// Helper function for argument untupling. + pub(super) fn fn_arg_field( &self, arg: &FnArg<'tcx, M::Provenance>, field: usize, @@ -78,190 +69,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { }) } - pub(super) fn eval_terminator( - &mut self, - terminator: &mir::Terminator<'tcx>, - ) -> InterpResult<'tcx> { - use rustc_middle::mir::TerminatorKind::*; - match terminator.kind { - Return => { - self.return_from_current_stack_frame(/* unwinding */ false)? - } - - Goto { target } => self.go_to_block(target), - - SwitchInt { ref discr, ref targets } => { - let discr = self.read_immediate(&self.eval_operand(discr, None)?)?; - trace!("SwitchInt({:?})", *discr); - - // Branch to the `otherwise` case by default, if no match is found. - let mut target_block = targets.otherwise(); - - for (const_int, target) in targets.iter() { - // Compare using MIR BinOp::Eq, to also support pointer values. - // (Avoiding `self.binary_op` as that does some redundant layout computation.) - let res = self.binary_op( - mir::BinOp::Eq, - &discr, - &ImmTy::from_uint(const_int, discr.layout), - )?; - if res.to_scalar().to_bool()? { - target_block = target; - break; - } - } - - self.go_to_block(target_block); - } - - Call { - ref func, - ref args, - destination, - target, - unwind, - call_source: _, - fn_span: _, - } => { - let old_stack = self.frame_idx(); - let old_loc = self.frame().loc; - - let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } = - self.eval_callee_and_args(terminator, func, args)?; - - let destination = self.force_allocation(&self.eval_place(destination)?)?; - self.eval_fn_call( - callee, - (fn_sig.abi, fn_abi), - &args, - with_caller_location, - &destination, - target, - if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable }, - )?; - // Sanity-check that `eval_fn_call` either pushed a new frame or - // did a jump to another block. - if self.frame_idx() == old_stack && self.frame().loc == old_loc { - span_bug!(terminator.source_info.span, "evaluating this call made no progress"); - } - } - - TailCall { ref func, ref args, fn_span: _ } => { - let old_frame_idx = self.frame_idx(); - - let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } = - self.eval_callee_and_args(terminator, func, args)?; - - self.eval_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?; - - if self.frame_idx() != old_frame_idx { - span_bug!( - terminator.source_info.span, - "evaluating this tail call pushed a new stack frame" - ); - } - } - - Drop { place, target, unwind, replace: _ } => { - let place = self.eval_place(place)?; - let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); - if let ty::InstanceKind::DropGlue(_, None) = instance.def { - // This is the branch we enter if and only if the dropped type has no drop glue - // whatsoever. This can happen as a result of monomorphizing a drop of a - // generic. In order to make sure that generic and non-generic code behaves - // roughly the same (and in keeping with Mir semantics) we do nothing here. - self.go_to_block(target); - return Ok(()); - } - trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty); - self.drop_in_place(&place, instance, target, unwind)?; - } - - Assert { ref cond, expected, ref msg, target, unwind } => { - let ignored = - M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check(); - let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?; - if ignored || expected == cond_val { - self.go_to_block(target); - } else { - M::assert_panic(self, msg, unwind)?; - } - } - - UnwindTerminate(reason) => { - M::unwind_terminate(self, reason)?; - } - - // When we encounter Resume, we've finished unwinding - // cleanup for the current stack frame. We pop it in order - // to continue unwinding the next frame - UnwindResume => { - trace!("unwinding: resuming from cleanup"); - // By definition, a Resume terminator means - // that we're unwinding - self.return_from_current_stack_frame(/* unwinding */ true)?; - return Ok(()); - } - - // It is UB to ever encounter this. - Unreachable => throw_ub!(Unreachable), - - // These should never occur for MIR we actually run. - FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!( - terminator.source_info.span, - "{:#?} should have been eliminated by MIR pass", - terminator.kind - ), - - InlineAsm { template, ref operands, options, ref targets, .. } => { - M::eval_inline_asm(self, template, operands, options, targets)?; - } - } - - Ok(()) - } - - /// Evaluate the arguments of a function call - pub(super) fn eval_fn_call_arguments( - &self, - ops: &[Spanned>], - ) -> InterpResult<'tcx, Vec>> { - ops.iter() - .map(|op| { - let arg = match &op.node { - mir::Operand::Copy(_) | mir::Operand::Constant(_) => { - // Make a regular copy. - let op = self.eval_operand(&op.node, None)?; - FnArg::Copy(op) - } - mir::Operand::Move(place) => { - // If this place lives in memory, preserve its location. - // We call `place_to_op` which will be an `MPlaceTy` whenever there exists - // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local` - // which can return a local even if that has an mplace.) - let place = self.eval_place(*place)?; - let op = self.place_to_op(&place)?; - - match op.as_mplace_or_imm() { - Either::Left(mplace) => FnArg::InPlace(mplace), - Either::Right(_imm) => { - // This argument doesn't live in memory, so there's no place - // to make inaccessible during the call. - // We rely on there not being any stray `PlaceTy` that would let the - // caller directly access this local! - // This is also crucial for tail calls, where we want the `FnArg` to - // stay valid when the old stack frame gets popped. - FnArg::Copy(op) - } - } - } - }; - - Ok(arg) - }) - .collect() - } - /// Find the wrapped inner type of a transparent wrapper. /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field"). /// @@ -503,46 +310,205 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Ok(()) } - /// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the - /// necessary information about callee and arguments to make a call. - fn eval_callee_and_args( - &self, - terminator: &mir::Terminator<'tcx>, - func: &mir::Operand<'tcx>, - args: &[Spanned>], - ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> { - let func = self.eval_operand(func, None)?; - let args = self.eval_fn_call_arguments(args)?; - - let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx); - let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder); - let extra_args = &args[fn_sig.inputs().len()..]; - let extra_args = - self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty)); - - let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() { - ty::FnPtr(_sig) => { - let fn_ptr = self.read_pointer(&func)?; - let fn_val = self.get_ptr_fn(fn_ptr)?; - (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false) - } - ty::FnDef(def_id, args) => { - let instance = self.resolve(def_id, args)?; - ( - FnVal::Instance(instance), - self.fn_abi_of_instance(instance, extra_args)?, - instance.def.requires_caller_location(*self.tcx), + fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> { + // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988 + let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + if !self.tcx.sess.target.is_like_wasm + && attrs + .target_features + .iter() + .any(|feature| !self.tcx.sess.target_features.contains(feature)) + { + throw_ub_custom!( + fluent::const_eval_unavailable_target_features_for_fn, + unavailable_feats = attrs + .target_features + .iter() + .filter(|&feature| !self.tcx.sess.target_features.contains(feature)) + .fold(String::new(), |mut s, feature| { + if !s.is_empty() { + s.push_str(", "); + } + s.push_str(feature.as_str()); + s + }), + ); + } + Ok(()) + } + + /// The main entry point for creating a new stack frame: performs ABI checks and initializes + /// arguments. + #[instrument(skip(self), level = "trace")] + pub fn init_stack_frame( + &mut self, + instance: Instance<'tcx>, + body: &'tcx mir::Body<'tcx>, + caller_fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + args: &[FnArg<'tcx, M::Provenance>], + with_caller_location: bool, + destination: &MPlaceTy<'tcx, M::Provenance>, + mut stack_pop: StackPopCleanup, + ) -> InterpResult<'tcx> { + // Compute callee information. + // FIXME: for variadic support, do we have to somehow determine callee's extra_args? + let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; + + if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic { + throw_unsup_format!("calling a c-variadic function is not supported"); + } + + if M::enforce_abi(self) { + if caller_fn_abi.conv != callee_fn_abi.conv { + throw_ub_custom!( + fluent::const_eval_incompatible_calling_conventions, + callee_conv = format!("{:?}", callee_fn_abi.conv), + caller_conv = format!("{:?}", caller_fn_abi.conv), ) } - _ => { - span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty) + } + + // Check that all target features required by the callee (i.e., from + // the attribute `#[target_feature(enable = ...)]`) are enabled at + // compile time. + self.check_fn_target_features(instance)?; + + if !callee_fn_abi.can_unwind { + // The callee cannot unwind, so force the `Unreachable` unwind handling. + match &mut stack_pop { + StackPopCleanup::Root { .. } => {} + StackPopCleanup::Goto { unwind, .. } => { + *unwind = mir::UnwindAction::Unreachable; + } + } + } + + self.push_stack_frame_raw(instance, body, destination, stack_pop)?; + + // If an error is raised here, pop the frame again to get an accurate backtrace. + // To this end, we wrap it all in a `try` block. + let res: InterpResult<'tcx> = try { + trace!( + "caller ABI: {:#?}, args: {:#?}", + caller_fn_abi, + args.iter() + .map(|arg| ( + arg.layout().ty, + match arg { + FnArg::Copy(op) => format!("copy({op:?})"), + FnArg::InPlace(mplace) => format!("in-place({mplace:?})"), + } + )) + .collect::>() + ); + trace!( + "spread_arg: {:?}, locals: {:#?}", + body.spread_arg, + body.args_iter() + .map(|local| ( + local, + self.layout_of_local(self.frame(), local, None).unwrap().ty, + )) + .collect::>() + ); + + // In principle, we have two iterators: Where the arguments come from, and where + // they go to. + + // The "where they come from" part is easy, we expect the caller to do any special handling + // that might be required here (e.g. for untupling). + // If `with_caller_location` is set we pretend there is an extra argument (that + // we will not pass; our `caller_location` intrinsic implementation walks the stack instead). + assert_eq!( + args.len() + if with_caller_location { 1 } else { 0 }, + caller_fn_abi.args.len(), + "mismatch between caller ABI and caller arguments", + ); + let mut caller_args = args + .iter() + .zip(caller_fn_abi.args.iter()) + .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore)); + + // Now we have to spread them out across the callee's locals, + // taking into account the `spread_arg`. If we could write + // this is a single iterator (that handles `spread_arg`), then + // `pass_argument` would be the loop body. It takes care to + // not advance `caller_iter` for ignored arguments. + let mut callee_args_abis = callee_fn_abi.args.iter(); + for local in body.args_iter() { + // Construct the destination place for this argument. At this point all + // locals are still dead, so we cannot construct a `PlaceTy`. + let dest = mir::Place::from(local); + // `layout_of_local` does more than just the instantiation we need to get the + // type, but the result gets cached so this avoids calling the instantiation + // query *again* the next time this local is accessed. + let ty = self.layout_of_local(self.frame(), local, None)?.ty; + if Some(local) == body.spread_arg { + // Make the local live once, then fill in the value field by field. + self.storage_live(local)?; + // Must be a tuple + let ty::Tuple(fields) = ty.kind() else { + span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}") + }; + for (i, field_ty) in fields.iter().enumerate() { + let dest = dest.project_deeper( + &[mir::ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)], + *self.tcx, + ); + let callee_abi = callee_args_abis.next().unwrap(); + self.pass_argument( + &mut caller_args, + callee_abi, + &dest, + field_ty, + /* already_live */ true, + )?; + } + } else { + // Normal argument. Cannot mark it as live yet, it might be unsized! + let callee_abi = callee_args_abis.next().unwrap(); + self.pass_argument( + &mut caller_args, + callee_abi, + &dest, + ty, + /* already_live */ false, + )?; + } + } + // If the callee needs a caller location, pretend we consume one more argument from the ABI. + if instance.def.requires_caller_location(*self.tcx) { + callee_args_abis.next().unwrap(); + } + // Now we should have no more caller args or callee arg ABIs + assert!( + callee_args_abis.next().is_none(), + "mismatch between callee ABI and callee body arguments" + ); + if caller_args.next().is_some() { + throw_ub_custom!(fluent::const_eval_too_many_caller_args); + } + // Don't forget to check the return type! + if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? { + throw_ub!(AbiMismatchReturn { + caller_ty: caller_fn_abi.ret.layout.ty, + callee_ty: callee_fn_abi.ret.layout.ty + }); } - }; - Ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location }) + // Protect return place for in-place return value passing. + M::protect_in_place_function_argument(self, &destination)?; + + // Don't forget to mark "initially live" locals as live. + self.storage_live_for_always_live_locals()?; + }; + res.inspect_err(|_| { + // Don't show the incomplete stack frame in the error stacktrace. + self.stack_mut().pop(); + }) } - /// Call this function -- pushing the stack frame and initializing the arguments. + /// Initiate a call to this function -- pushing the stack frame and initializing the arguments. /// /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way. /// However, we also need `caller_abi` to determine if we need to do untupling of arguments. @@ -550,7 +516,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// `with_caller_location` indicates whether the caller passed a caller location. Miri /// implements caller locations without argument passing, but to match `FnAbi` we need to know /// when those arguments are present. - pub(crate) fn eval_fn_call( + pub(super) fn init_fn_call( &mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>, (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>), @@ -558,9 +524,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { with_caller_location: bool, destination: &MPlaceTy<'tcx, M::Provenance>, target: Option, - mut unwind: mir::UnwindAction, + unwind: mir::UnwindAction, ) -> InterpResult<'tcx> { - trace!("eval_fn_call: {:#?}", fn_val); + trace!("init_fn_call: {:#?}", fn_val); let instance = match fn_val { FnVal::Instance(instance) => instance, @@ -591,7 +557,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { )? { assert!(!self.tcx.intrinsic(fallback.def_id()).unwrap().must_be_overridden); assert!(matches!(fallback.def, ty::InstanceKind::Item(_))); - return self.eval_fn_call( + return self.init_fn_call( FnVal::Instance(fallback), (caller_abi, caller_fn_abi), args, @@ -630,189 +596,35 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { return Ok(()); }; - // Compute callee information using the `instance` returned by - // `find_mir_or_eval_fn`. - // FIXME: for variadic support, do we have to somehow determine callee's extra_args? - let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; - - if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic { - throw_unsup_format!("calling a c-variadic function is not supported"); - } - - if M::enforce_abi(self) { - if caller_fn_abi.conv != callee_fn_abi.conv { - throw_ub_custom!( - fluent::const_eval_incompatible_calling_conventions, - callee_conv = format!("{:?}", callee_fn_abi.conv), - caller_conv = format!("{:?}", caller_fn_abi.conv), + // Special handling for the closure ABI: untuple the last argument. + let args: Cow<'_, [FnArg<'tcx, M::Provenance>]> = + if caller_abi == Abi::RustCall && !args.is_empty() { + // Untuple + let (untuple_arg, args) = args.split_last().unwrap(); + trace!("init_fn_call: Will pass last argument by untupling"); + Cow::from( + args.iter() + .map(|a| Ok(a.clone())) + .chain( + (0..untuple_arg.layout().fields.count()) + .map(|i| self.fn_arg_field(untuple_arg, i)), + ) + .collect::>>()?, ) - } - } - - // Check that all target features required by the callee (i.e., from - // the attribute `#[target_feature(enable = ...)]`) are enabled at - // compile time. - self.check_fn_target_features(instance)?; - - if !callee_fn_abi.can_unwind { - // The callee cannot unwind, so force the `Unreachable` unwind handling. - unwind = mir::UnwindAction::Unreachable; - } + } else { + // Plain arg passing + Cow::from(args) + }; - self.push_stack_frame( + self.init_stack_frame( instance, body, + caller_fn_abi, + &args, + with_caller_location, destination, StackPopCleanup::Goto { ret: target, unwind }, - )?; - - // If an error is raised here, pop the frame again to get an accurate backtrace. - // To this end, we wrap it all in a `try` block. - let res: InterpResult<'tcx> = try { - trace!( - "caller ABI: {:?}, args: {:#?}", - caller_abi, - args.iter() - .map(|arg| ( - arg.layout().ty, - match arg { - FnArg::Copy(op) => format!("copy({op:?})"), - FnArg::InPlace(mplace) => format!("in-place({mplace:?})"), - } - )) - .collect::>() - ); - trace!( - "spread_arg: {:?}, locals: {:#?}", - body.spread_arg, - body.args_iter() - .map(|local| ( - local, - self.layout_of_local(self.frame(), local, None).unwrap().ty, - )) - .collect::>() - ); - - // In principle, we have two iterators: Where the arguments come from, and where - // they go to. - - // For where they come from: If the ABI is RustCall, we untuple the - // last incoming argument. These two iterators do not have the same type, - // so to keep the code paths uniform we accept an allocation - // (for RustCall ABI only). - let caller_args: Cow<'_, [FnArg<'tcx, M::Provenance>]> = - if caller_abi == Abi::RustCall && !args.is_empty() { - // Untuple - let (untuple_arg, args) = args.split_last().unwrap(); - trace!("eval_fn_call: Will pass last argument by untupling"); - Cow::from( - args.iter() - .map(|a| Ok(a.clone())) - .chain( - (0..untuple_arg.layout().fields.count()) - .map(|i| self.fn_arg_field(untuple_arg, i)), - ) - .collect::>>()?, - ) - } else { - // Plain arg passing - Cow::from(args) - }; - // If `with_caller_location` is set we pretend there is an extra argument (that - // we will not pass). - assert_eq!( - caller_args.len() + if with_caller_location { 1 } else { 0 }, - caller_fn_abi.args.len(), - "mismatch between caller ABI and caller arguments", - ); - let mut caller_args = caller_args - .iter() - .zip(caller_fn_abi.args.iter()) - .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore)); - - // Now we have to spread them out across the callee's locals, - // taking into account the `spread_arg`. If we could write - // this is a single iterator (that handles `spread_arg`), then - // `pass_argument` would be the loop body. It takes care to - // not advance `caller_iter` for ignored arguments. - let mut callee_args_abis = callee_fn_abi.args.iter(); - for local in body.args_iter() { - // Construct the destination place for this argument. At this point all - // locals are still dead, so we cannot construct a `PlaceTy`. - let dest = mir::Place::from(local); - // `layout_of_local` does more than just the instantiation we need to get the - // type, but the result gets cached so this avoids calling the instantiation - // query *again* the next time this local is accessed. - let ty = self.layout_of_local(self.frame(), local, None)?.ty; - if Some(local) == body.spread_arg { - // Make the local live once, then fill in the value field by field. - self.storage_live(local)?; - // Must be a tuple - let ty::Tuple(fields) = ty.kind() else { - span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}") - }; - for (i, field_ty) in fields.iter().enumerate() { - let dest = dest.project_deeper( - &[mir::ProjectionElem::Field( - FieldIdx::from_usize(i), - field_ty, - )], - *self.tcx, - ); - let callee_abi = callee_args_abis.next().unwrap(); - self.pass_argument( - &mut caller_args, - callee_abi, - &dest, - field_ty, - /* already_live */ true, - )?; - } - } else { - // Normal argument. Cannot mark it as live yet, it might be unsized! - let callee_abi = callee_args_abis.next().unwrap(); - self.pass_argument( - &mut caller_args, - callee_abi, - &dest, - ty, - /* already_live */ false, - )?; - } - } - // If the callee needs a caller location, pretend we consume one more argument from the ABI. - if instance.def.requires_caller_location(*self.tcx) { - callee_args_abis.next().unwrap(); - } - // Now we should have no more caller args or callee arg ABIs - assert!( - callee_args_abis.next().is_none(), - "mismatch between callee ABI and callee body arguments" - ); - if caller_args.next().is_some() { - throw_ub_custom!(fluent::const_eval_too_many_caller_args); - } - // Don't forget to check the return type! - if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? { - throw_ub!(AbiMismatchReturn { - caller_ty: caller_fn_abi.ret.layout.ty, - callee_ty: callee_fn_abi.ret.layout.ty - }); - } - - // Protect return place for in-place return value passing. - M::protect_in_place_function_argument(self, &destination)?; - - // Don't forget to mark "initially live" locals as live. - self.storage_live_for_always_live_locals()?; - }; - match res { - Err(err) => { - self.stack_mut().pop(); - Err(err) - } - Ok(()) => Ok(()), - } + ) } // `InstanceKind::Virtual` does not have callable MIR. Calls to `Virtual` instances must be // codegen'd / interpreted as virtual calls through the vtable. @@ -935,7 +747,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { caller_fn_abi.args[0].layout.ty = receiver_ty; // recurse with concrete function - self.eval_fn_call( + self.init_fn_call( FnVal::Instance(fn_inst), (caller_abi, &caller_fn_abi), &args, @@ -948,30 +760,33 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } } - pub(crate) fn eval_fn_tail_call( + /// Initiate a tail call to this function -- popping the current stack frame, pushing the new + /// stack frame and initializing the arguments. + pub(super) fn init_fn_tail_call( &mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>, (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>), args: &[FnArg<'tcx, M::Provenance>], with_caller_location: bool, ) -> InterpResult<'tcx> { - trace!("eval_fn_call: {:#?}", fn_val); + trace!("init_fn_tail_call: {:#?}", fn_val); // This is the "canonical" implementation of tails calls, // a pop of the current stack frame, followed by a normal call // which pushes a new stack frame, with the return address from // the popped stack frame. // - // Note that we are using `pop_stack_frame` and not `return_from_current_stack_frame`, + // Note that we are using `pop_stack_frame_raw` and not `return_from_current_stack_frame`, // as the latter "executes" the goto to the return block, but we don't want to, // only the tail called function should return to the current return block. M::before_stack_pop(self, self.frame())?; let StackPopInfo { return_action, return_to_block, return_place } = - self.pop_stack_frame(false)?; + self.pop_stack_frame_raw(false)?; assert_eq!(return_action, ReturnAction::Normal); + // Take the "stack pop cleanup" info, and use that to initiate the next call. let StackPopCleanup::Goto { ret, unwind } = return_to_block else { bug!("can't tailcall as root"); }; @@ -980,7 +795,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // we should check if both caller&callee can/n't unwind, // see - self.eval_fn_call( + self.init_fn_call( fn_val, (caller_abi, caller_fn_abi), args, @@ -991,41 +806,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ) } - fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> { - // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988 - let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); - if !self.tcx.sess.target.is_like_wasm - && attrs - .target_features - .iter() - .any(|feature| !self.tcx.sess.target_features.contains(feature)) - { - throw_ub_custom!( - fluent::const_eval_unavailable_target_features_for_fn, - unavailable_feats = attrs - .target_features - .iter() - .filter(|&feature| !self.tcx.sess.target_features.contains(feature)) - .fold(String::new(), |mut s, feature| { - if !s.is_empty() { - s.push_str(", "); - } - s.push_str(feature.as_str()); - s - }), - ); - } - Ok(()) - } - - fn drop_in_place( + pub(super) fn init_drop_in_place_call( &mut self, place: &PlaceTy<'tcx, M::Provenance>, instance: ty::Instance<'tcx>, target: mir::BasicBlock, unwind: mir::UnwindAction, ) -> InterpResult<'tcx> { - trace!("drop_in_place: {:?},\n instance={:?}", place, instance); + trace!("init_drop_in_place_call: {:?},\n instance={:?}", place, instance); // We take the address of the object. This may well be unaligned, which is fine // for us here. However, unaligned accesses will probably make the actual drop // implementation fail -- a problem shared by rustc. @@ -1060,7 +848,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let arg = self.mplace_to_ref(&place)?; let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?); - self.eval_fn_call( + self.init_fn_call( FnVal::Instance(instance), (Abi::Rust, fn_abi), &[FnArg::Copy(arg.into())], @@ -1070,4 +858,118 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { unwind, ) } + + /// Pops the current frame from the stack, copies the return value to the caller, deallocates + /// the memory for allocated locals, and jumps to an appropriate place. + /// + /// If `unwinding` is `false`, then we are performing a normal return + /// from a function. In this case, we jump back into the frame of the caller, + /// and continue execution as normal. + /// + /// If `unwinding` is `true`, then we are in the middle of a panic, + /// and need to unwind this frame. In this case, we jump to the + /// `cleanup` block for the function, which is responsible for running + /// `Drop` impls for any locals that have been initialized at this point. + /// The cleanup block ends with a special `Resume` terminator, which will + /// cause us to continue unwinding. + #[instrument(skip(self), level = "trace")] + pub(super) fn return_from_current_stack_frame( + &mut self, + unwinding: bool, + ) -> InterpResult<'tcx> { + info!( + "popping stack frame ({})", + if unwinding { "during unwinding" } else { "returning from function" } + ); + + // Check `unwinding`. + assert_eq!( + unwinding, + match self.frame().loc { + Left(loc) => self.body().basic_blocks[loc.block].is_cleanup, + Right(_) => true, + } + ); + if unwinding && self.frame_idx() == 0 { + throw_ub_custom!(fluent::const_eval_unwind_past_top); + } + + M::before_stack_pop(self, self.frame())?; + + // Copy return value. Must of course happen *before* we deallocate the locals. + // Must be *after* `before_stack_pop` as otherwise the return place might still be protected. + let copy_ret_result = if !unwinding { + let op = self + .local_to_op(mir::RETURN_PLACE, None) + .expect("return place should always be live"); + let dest = self.frame().return_place.clone(); + let res = if self.stack().len() == 1 { + // The initializer of constants and statics will get validated separately + // after the constant has been fully evaluated. While we could fall back to the default + // code path, that will cause -Zenforce-validity to cycle on static initializers. + // Reading from a static's memory is not allowed during its evaluation, and will always + // trigger a cycle error. Validation must read from the memory of the current item. + // For Miri this means we do not validate the root frame return value, + // but Miri anyway calls `read_target_isize` on that so separate validation + // is not needed. + self.copy_op_no_dest_validation(&op, &dest) + } else { + self.copy_op_allow_transmute(&op, &dest) + }; + trace!("return value: {:?}", self.dump_place(&dest.into())); + // We delay actually short-circuiting on this error until *after* the stack frame is + // popped, since we want this error to be attributed to the caller, whose type defines + // this transmute. + res + } else { + Ok(()) + }; + + // All right, now it is time to actually pop the frame. + let stack_pop_info = self.pop_stack_frame_raw(unwinding)?; + + // Report error from return value copy, if any. + copy_ret_result?; + + match stack_pop_info.return_action { + ReturnAction::Normal => {} + ReturnAction::NoJump => { + // The hook already did everything. + return Ok(()); + } + ReturnAction::NoCleanup => { + // If we are not doing cleanup, also skip everything else. + assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked"); + assert!(!unwinding, "tried to skip cleanup during unwinding"); + // Skip machine hook. + return Ok(()); + } + } + + // Normal return, figure out where to jump. + if unwinding { + // Follow the unwind edge. + match stack_pop_info.return_to_block { + StackPopCleanup::Goto { unwind, .. } => { + // This must be the very last thing that happens, since it can in fact push a new stack frame. + self.unwind_to_block(unwind) + } + StackPopCleanup::Root { .. } => { + panic!("encountered StackPopCleanup::Root when unwinding!") + } + } + } else { + // Follow the normal return edge. + match stack_pop_info.return_to_block { + StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret), + StackPopCleanup::Root { .. } => { + assert!( + self.stack().is_empty(), + "only the bottommost frame can have StackPopCleanup::Root" + ); + Ok(()) + } + } + } + } } diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 18d585e4a7a7e..7a6bbdfdcb5e5 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -1,40 +1,29 @@ -use std::cell::Cell; -use std::{fmt, mem}; - -use either::{Either, Left, Right}; +use either::{Left, Right}; use rustc_errors::DiagCtxtHandle; use rustc_hir::def_id::DefId; -use rustc_hir::definitions::DefPathData; -use rustc_hir::{self as hir}; -use rustc_index::IndexVec; use rustc_infer::infer::at::ToTrace; use rustc_infer::infer::TyCtxtInferExt; use rustc_infer::traits::ObligationCause; -use rustc_middle::mir::interpret::{ - CtfeProvenance, ErrorHandled, InvalidMetaKind, ReportedErrorInfo, -}; +use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo}; use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::layout::{ - self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers, - TyAndLayout, + self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, }; use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance}; -use rustc_middle::{bug, mir, span_bug}; -use rustc_mir_dataflow::storage::always_storage_live_locals; +use rustc_middle::{mir, span_bug}; use rustc_session::Limit; use rustc_span::Span; use rustc_target::abi::call::FnAbi; use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout}; use rustc_trait_selection::traits::ObligationCtxt; -use tracing::{debug, info, info_span, instrument, trace}; +use tracing::{debug, trace}; use super::{ - err_inval, throw_inval, throw_ub, throw_ub_custom, throw_unsup, GlobalId, Immediate, - InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind, - OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic, Projectable, Provenance, - ReturnAction, Scalar, + err_inval, throw_inval, throw_ub, throw_ub_custom, Frame, FrameInfo, GlobalId, InterpErrorInfo, + InterpResult, MPlaceTy, Machine, MemPlaceMeta, Memory, OpTy, Place, PlaceTy, PointerArithmetic, + Projectable, Provenance, }; -use crate::{errors, fluent_generated as fluent, util, ReportErrorExt}; +use crate::{fluent_generated as fluent, util, ReportErrorExt}; pub struct InterpCx<'tcx, M: Machine<'tcx>> { /// Stores the `Machine` instance. @@ -57,314 +46,6 @@ pub struct InterpCx<'tcx, M: Machine<'tcx>> { pub recursion_limit: Limit, } -// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread -// boundary and dropped in the other thread, it would exit the span in the other thread. -struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>); - -impl SpanGuard { - /// By default a `SpanGuard` does nothing. - fn new() -> Self { - Self(tracing::Span::none(), std::marker::PhantomData) - } - - /// If a span is entered, we exit the previous span (if any, normally none) and enter the - /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of - /// `Frame` by creating a dummy span to being with and then entering it once the frame has - /// been pushed. - fn enter(&mut self, span: tracing::Span) { - // This executes the destructor on the previous instance of `SpanGuard`, ensuring that - // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we - // can't protect the tracing stack, but that'll just lead to weird logging, no actual - // problems. - *self = Self(span, std::marker::PhantomData); - self.0.with_subscriber(|(id, dispatch)| { - dispatch.enter(id); - }); - } -} - -impl Drop for SpanGuard { - fn drop(&mut self) { - self.0.with_subscriber(|(id, dispatch)| { - dispatch.exit(id); - }); - } -} - -/// A stack frame. -pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { - //////////////////////////////////////////////////////////////////////////////// - // Function and callsite information - //////////////////////////////////////////////////////////////////////////////// - /// The MIR for the function called on this frame. - pub body: &'tcx mir::Body<'tcx>, - - /// The def_id and args of the current function. - pub instance: ty::Instance<'tcx>, - - /// Extra data for the machine. - pub extra: Extra, - - //////////////////////////////////////////////////////////////////////////////// - // Return place and locals - //////////////////////////////////////////////////////////////////////////////// - /// Work to perform when returning from this function. - pub return_to_block: StackPopCleanup, - - /// The location where the result of the current stack frame should be written to, - /// and its layout in the caller. - pub return_place: MPlaceTy<'tcx, Prov>, - - /// The list of locals for this stack frame, stored in order as - /// `[return_ptr, arguments..., variables..., temporaries...]`. - /// The locals are stored as `Option`s. - /// `None` represents a local that is currently dead, while a live local - /// can either directly contain `Scalar` or refer to some part of an `Allocation`. - /// - /// Do *not* access this directly; always go through the machine hook! - pub locals: IndexVec>, - - /// The span of the `tracing` crate is stored here. - /// When the guard is dropped, the span is exited. This gives us - /// a full stack trace on all tracing statements. - tracing_span: SpanGuard, - - //////////////////////////////////////////////////////////////////////////////// - // Current position within the function - //////////////////////////////////////////////////////////////////////////////// - /// If this is `Right`, we are not currently executing any particular statement in - /// this frame (can happen e.g. during frame initialization, and during unwinding on - /// frames without cleanup code). - /// - /// Needs to be public because ConstProp does unspeakable things to it. - pub loc: Either, -} - -/// What we store about a frame in an interpreter backtrace. -#[derive(Clone, Debug)] -pub struct FrameInfo<'tcx> { - pub instance: ty::Instance<'tcx>, - pub span: Span, -} - -#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these -pub enum StackPopCleanup { - /// Jump to the next block in the caller, or cause UB if None (that's a function - /// that may never return). Also store layout of return place so - /// we can validate it at that layout. - /// `ret` stores the block we jump to on a normal return, while `unwind` - /// stores the block used for cleanup during unwinding. - Goto { ret: Option, unwind: mir::UnwindAction }, - /// The root frame of the stack: nowhere else to jump to. - /// `cleanup` says whether locals are deallocated. Static computation - /// wants them leaked to intern what they need (and just throw away - /// the entire `ecx` when it is done). - Root { cleanup: bool }, -} - -/// Return type of [`InterpCx::pop_stack_frame`]. -pub struct StackPopInfo<'tcx, Prov: Provenance> { - /// Additional information about the action to be performed when returning from the popped - /// stack frame. - pub return_action: ReturnAction, - - /// [`return_to_block`](Frame::return_to_block) of the popped stack frame. - pub return_to_block: StackPopCleanup, - - /// [`return_place`](Frame::return_place) of the popped stack frame. - pub return_place: MPlaceTy<'tcx, Prov>, -} - -/// State of a local variable including a memoized layout -#[derive(Clone)] -pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> { - value: LocalValue, - /// Don't modify if `Some`, this is only used to prevent computing the layout twice. - /// Avoids computing the layout of locals that are never actually initialized. - layout: Cell>>, -} - -impl std::fmt::Debug for LocalState<'_, Prov> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("LocalState") - .field("value", &self.value) - .field("ty", &self.layout.get().map(|l| l.ty)) - .finish() - } -} - -/// Current value of a local variable -/// -/// This does not store the type of the local; the type is given by `body.local_decls` and can never -/// change, so by not storing here we avoid having to maintain that as an invariant. -#[derive(Copy, Clone, Debug)] // Miri debug-prints these -pub(super) enum LocalValue { - /// This local is not currently alive, and cannot be used at all. - Dead, - /// A normal, live local. - /// Mostly for convenience, we re-use the `Operand` type here. - /// This is an optimization over just always having a pointer here; - /// we can thus avoid doing an allocation when the local just stores - /// immediate values *and* never has its address taken. - Live(Operand), -} - -impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { - pub fn make_live_uninit(&mut self) { - self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit)); - } - - /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState` - /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type - /// private. - pub fn as_mplace_or_imm( - &self, - ) -> Option>, MemPlaceMeta), Immediate>> { - match self.value { - LocalValue::Dead => None, - LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))), - LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)), - } - } - - /// Read the local's value or error if the local is not yet live or not live anymore. - #[inline(always)] - pub(super) fn access(&self) -> InterpResult<'tcx, &Operand> { - match &self.value { - LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? - LocalValue::Live(val) => Ok(val), - } - } - - /// Overwrite the local. If the local can be overwritten in place, return a reference - /// to do so; otherwise return the `MemPlace` to consult instead. - #[inline(always)] - pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand> { - match &mut self.value { - LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? - LocalValue::Live(val) => Ok(val), - } - } -} - -impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> { - pub fn with_extra(self, extra: Extra) -> Frame<'tcx, Prov, Extra> { - Frame { - body: self.body, - instance: self.instance, - return_to_block: self.return_to_block, - return_place: self.return_place, - locals: self.locals, - loc: self.loc, - extra, - tracing_span: self.tracing_span, - } - } -} - -impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { - /// Get the current location within the Frame. - /// - /// If this is `Right`, we are not currently executing any particular statement in - /// this frame (can happen e.g. during frame initialization, and during unwinding on - /// frames without cleanup code). - /// - /// Used by priroda. - pub fn current_loc(&self) -> Either { - self.loc - } - - /// Return the `SourceInfo` of the current instruction. - pub fn current_source_info(&self) -> Option<&mir::SourceInfo> { - self.loc.left().map(|loc| self.body.source_info(loc)) - } - - pub fn current_span(&self) -> Span { - match self.loc { - Left(loc) => self.body.source_info(loc).span, - Right(span) => span, - } - } - - pub fn lint_root(&self, tcx: TyCtxt<'tcx>) -> Option { - // We first try to get a HirId via the current source scope, - // and fall back to `body.source`. - self.current_source_info() - .and_then(|source_info| match &self.body.source_scopes[source_info.scope].local_data { - mir::ClearCrossCrate::Set(data) => Some(data.lint_root), - mir::ClearCrossCrate::Clear => None, - }) - .or_else(|| { - let def_id = self.body.source.def_id().as_local(); - def_id.map(|def_id| tcx.local_def_id_to_hir_id(def_id)) - }) - } - - /// Returns the address of the buffer where the locals are stored. This is used by `Place` as a - /// sanity check to detect bugs where we mix up which stack frame a place refers to. - #[inline(always)] - pub(super) fn locals_addr(&self) -> usize { - self.locals.raw.as_ptr().addr() - } - - #[must_use] - pub fn generate_stacktrace_from_stack(stack: &[Self]) -> Vec> { - let mut frames = Vec::new(); - // This deliberately does *not* honor `requires_caller_location` since it is used for much - // more than just panics. - for frame in stack.iter().rev() { - let span = match frame.loc { - Left(loc) => { - // If the stacktrace passes through MIR-inlined source scopes, add them. - let mir::SourceInfo { mut span, scope } = *frame.body.source_info(loc); - let mut scope_data = &frame.body.source_scopes[scope]; - while let Some((instance, call_span)) = scope_data.inlined { - frames.push(FrameInfo { span, instance }); - span = call_span; - scope_data = &frame.body.source_scopes[scope_data.parent_scope.unwrap()]; - } - span - } - Right(span) => span, - }; - frames.push(FrameInfo { span, instance: frame.instance }); - } - trace!("generate stacktrace: {:#?}", frames); - frames - } -} - -// FIXME: only used by miri, should be removed once translatable. -impl<'tcx> fmt::Display for FrameInfo<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ty::tls::with(|tcx| { - if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { - write!(f, "inside closure") - } else { - // Note: this triggers a `must_produce_diag` state, which means that if we ever - // get here we must emit a diagnostic. We should never display a `FrameInfo` unless - // we actually want to emit a warning or error to the user. - write!(f, "inside `{}`", self.instance) - } - }) - } -} - -impl<'tcx> FrameInfo<'tcx> { - pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote { - let span = self.span; - if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { - errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 } - } else { - let instance = format!("{}", self.instance); - // Note: this triggers a `must_produce_diag` state, which means that if we ever get - // here we must emit a diagnostic. We should never display a `FrameInfo` unless we - // actually want to emit a warning or error to the user. - errors::FrameNote { where_: "instance", span, instance, times: 0 } - } - } -} - impl<'tcx, M: Machine<'tcx>> HasDataLayout for InterpCx<'tcx, M> { #[inline] fn data_layout(&self) -> &TargetDataLayout { @@ -703,30 +384,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found") } - #[inline(always)] - pub(super) fn layout_of_local( - &self, - frame: &Frame<'tcx, M::Provenance, M::FrameExtra>, - local: mir::Local, - layout: Option>, - ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { - let state = &frame.locals[local]; - if let Some(layout) = state.layout.get() { - return Ok(layout); - } - - let layout = from_known_layout(self.tcx, self.param_env, layout, || { - let local_ty = frame.body.local_decls[local].ty; - let local_ty = - self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?; - self.layout_of(local_ty) - })?; - - // Layouts of locals are requested a lot, so we cache them. - state.layout.set(Some(layout)); - Ok(layout) - } - /// Returns the actual dynamic size and alignment of the place at the given type. /// Only the "meta" (metadata) part of the place matters. /// This can fail to provide an answer for extern types. @@ -825,132 +482,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.size_and_align_of(&mplace.meta(), &mplace.layout) } - #[instrument(skip(self, body, return_place, return_to_block), level = "debug")] - pub fn push_stack_frame( - &mut self, - instance: ty::Instance<'tcx>, - body: &'tcx mir::Body<'tcx>, - return_place: &MPlaceTy<'tcx, M::Provenance>, - return_to_block: StackPopCleanup, - ) -> InterpResult<'tcx> { - trace!("body: {:#?}", body); - - // First push a stack frame so we have access to the local args - self.push_new_stack_frame(instance, body, return_to_block, return_place.clone())?; - - self.after_stack_frame_push(instance, body)?; - - Ok(()) - } - - /// Creates a new stack frame, initializes it and pushes it onto the stack. - /// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame). - fn push_new_stack_frame( - &mut self, - instance: ty::Instance<'tcx>, - body: &'tcx mir::Body<'tcx>, - return_to_block: StackPopCleanup, - return_place: MPlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx> { - let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) }; - let locals = IndexVec::from_elem(dead_local, &body.local_decls); - let pre_frame = Frame { - body, - loc: Right(body.span), // Span used for errors caused during preamble. - return_to_block, - return_place, - locals, - instance, - tracing_span: SpanGuard::new(), - extra: (), - }; - let frame = M::init_frame(self, pre_frame)?; - self.stack_mut().push(frame); - - Ok(()) - } - - /// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame). - fn after_stack_frame_push( - &mut self, - instance: ty::Instance<'tcx>, - body: &'tcx mir::Body<'tcx>, - ) -> InterpResult<'tcx> { - // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check). - for &const_ in body.required_consts() { - let c = - self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?; - c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| { - err.emit_note(*self.tcx); - err - })?; - } - - // done - M::after_stack_push(self)?; - self.frame_mut().loc = Left(mir::Location::START); - - let span = info_span!("frame", "{}", instance); - self.frame_mut().tracing_span.enter(span); - - Ok(()) - } - - /// Pops a stack frame from the stack and returns some information about it. - /// - /// This also deallocates locals, if necessary. - /// - /// [`M::before_stack_pop`] should be called before calling this function. - /// [`M::after_stack_pop`] is called by this function automatically. - /// - /// [`M::before_stack_pop`]: Machine::before_stack_pop - /// [`M::after_stack_pop`]: Machine::after_stack_pop - pub fn pop_stack_frame( - &mut self, - unwinding: bool, - ) -> InterpResult<'tcx, StackPopInfo<'tcx, M::Provenance>> { - let cleanup = self.cleanup_current_frame_locals()?; - - let frame = - self.stack_mut().pop().expect("tried to pop a stack frame, but there were none"); - - let return_to_block = frame.return_to_block; - let return_place = frame.return_place.clone(); - - let return_action; - if cleanup { - return_action = M::after_stack_pop(self, frame, unwinding)?; - assert_ne!(return_action, ReturnAction::NoCleanup); - } else { - return_action = ReturnAction::NoCleanup; - }; - - Ok(StackPopInfo { return_action, return_to_block, return_place }) - } - - /// A private helper for [`pop_stack_frame`](InterpCx::pop_stack_frame). - /// Returns `true` if cleanup has been done, `false` otherwise. - fn cleanup_current_frame_locals(&mut self) -> InterpResult<'tcx, bool> { - // Cleanup: deallocate locals. - // Usually we want to clean up (deallocate locals), but in a few rare cases we don't. - // We do this while the frame is still on the stack, so errors point to the callee. - let return_to_block = self.frame().return_to_block; - let cleanup = match return_to_block { - StackPopCleanup::Goto { .. } => true, - StackPopCleanup::Root { cleanup, .. } => cleanup, - }; - - if cleanup { - // We need to take the locals out, since we need to mutate while iterating. - let locals = mem::take(&mut self.frame_mut().locals); - for local in &locals { - self.deallocate_local(local.value)?; - } - } - - Ok(cleanup) - } - /// Jump to the given block. #[inline] pub fn go_to_block(&mut self, target: mir::BasicBlock) { @@ -997,248 +528,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Ok(()) } - /// Pops the current frame from the stack, deallocating the - /// memory for allocated locals, and jumps to an appropriate place. - /// - /// If `unwinding` is `false`, then we are performing a normal return - /// from a function. In this case, we jump back into the frame of the caller, - /// and continue execution as normal. - /// - /// If `unwinding` is `true`, then we are in the middle of a panic, - /// and need to unwind this frame. In this case, we jump to the - /// `cleanup` block for the function, which is responsible for running - /// `Drop` impls for any locals that have been initialized at this point. - /// The cleanup block ends with a special `Resume` terminator, which will - /// cause us to continue unwinding. - #[instrument(skip(self), level = "debug")] - pub(super) fn return_from_current_stack_frame( - &mut self, - unwinding: bool, - ) -> InterpResult<'tcx> { - info!( - "popping stack frame ({})", - if unwinding { "during unwinding" } else { "returning from function" } - ); - - // Check `unwinding`. - assert_eq!( - unwinding, - match self.frame().loc { - Left(loc) => self.body().basic_blocks[loc.block].is_cleanup, - Right(_) => true, - } - ); - if unwinding && self.frame_idx() == 0 { - throw_ub_custom!(fluent::const_eval_unwind_past_top); - } - - M::before_stack_pop(self, self.frame())?; - - // Copy return value. Must of course happen *before* we deallocate the locals. - let copy_ret_result = if !unwinding { - let op = self - .local_to_op(mir::RETURN_PLACE, None) - .expect("return place should always be live"); - let dest = self.frame().return_place.clone(); - let err = if self.stack().len() == 1 { - // The initializer of constants and statics will get validated separately - // after the constant has been fully evaluated. While we could fall back to the default - // code path, that will cause -Zenforce-validity to cycle on static initializers. - // Reading from a static's memory is not allowed during its evaluation, and will always - // trigger a cycle error. Validation must read from the memory of the current item. - // For Miri this means we do not validate the root frame return value, - // but Miri anyway calls `read_target_isize` on that so separate validation - // is not needed. - self.copy_op_no_dest_validation(&op, &dest) - } else { - self.copy_op_allow_transmute(&op, &dest) - }; - trace!("return value: {:?}", self.dump_place(&dest.into())); - // We delay actually short-circuiting on this error until *after* the stack frame is - // popped, since we want this error to be attributed to the caller, whose type defines - // this transmute. - err - } else { - Ok(()) - }; - - // All right, now it is time to actually pop the frame. - let stack_pop_info = self.pop_stack_frame(unwinding)?; - - // Report error from return value copy, if any. - copy_ret_result?; - - match stack_pop_info.return_action { - ReturnAction::Normal => {} - ReturnAction::NoJump => { - // The hook already did everything. - return Ok(()); - } - ReturnAction::NoCleanup => { - // If we are not doing cleanup, also skip everything else. - assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked"); - assert!(!unwinding, "tried to skip cleanup during unwinding"); - // Skip machine hook. - return Ok(()); - } - } - - // Normal return, figure out where to jump. - if unwinding { - // Follow the unwind edge. - let unwind = match stack_pop_info.return_to_block { - StackPopCleanup::Goto { unwind, .. } => unwind, - StackPopCleanup::Root { .. } => { - panic!("encountered StackPopCleanup::Root when unwinding!") - } - }; - // This must be the very last thing that happens, since it can in fact push a new stack frame. - self.unwind_to_block(unwind) - } else { - // Follow the normal return edge. - match stack_pop_info.return_to_block { - StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret), - StackPopCleanup::Root { .. } => { - assert!( - self.stack().is_empty(), - "only the topmost frame can have StackPopCleanup::Root" - ); - Ok(()) - } - } - } - } - - /// In the current stack frame, mark all locals as live that are not arguments and don't have - /// `Storage*` annotations (this includes the return place). - pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> { - self.storage_live(mir::RETURN_PLACE)?; - - let body = self.body(); - let always_live = always_storage_live_locals(body); - for local in body.vars_and_temps_iter() { - if always_live.contains(local) { - self.storage_live(local)?; - } - } - Ok(()) - } - - pub fn storage_live_dyn( - &mut self, - local: mir::Local, - meta: MemPlaceMeta, - ) -> InterpResult<'tcx> { - trace!("{:?} is now live", local); - - // We avoid `ty.is_trivially_sized` since that does something expensive for ADTs. - fn is_very_trivially_sized(ty: Ty<'_>) -> bool { - match ty.kind() { - ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) - | ty::Uint(_) - | ty::Int(_) - | ty::Bool - | ty::Float(_) - | ty::FnDef(..) - | ty::FnPtr(_) - | ty::RawPtr(..) - | ty::Char - | ty::Ref(..) - | ty::Coroutine(..) - | ty::CoroutineWitness(..) - | ty::Array(..) - | ty::Closure(..) - | ty::CoroutineClosure(..) - | ty::Never - | ty::Error(_) - | ty::Dynamic(_, _, ty::DynStar) => true, - - ty::Str | ty::Slice(_) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => false, - - ty::Tuple(tys) => tys.last().is_none_or(|ty| is_very_trivially_sized(*ty)), - - ty::Pat(ty, ..) => is_very_trivially_sized(*ty), - - // We don't want to do any queries, so there is not much we can do with ADTs. - ty::Adt(..) => false, - - ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false, - - ty::Infer(ty::TyVar(_)) => false, - - ty::Bound(..) - | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { - bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty) - } - } - } - - // This is a hot function, we avoid computing the layout when possible. - // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types. - let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) { - None - } else { - // We need the layout. - let layout = self.layout_of_local(self.frame(), local, None)?; - if layout.is_sized() { None } else { Some(layout) } - }; - - let local_val = LocalValue::Live(if let Some(layout) = unsized_ { - if !meta.has_meta() { - throw_unsup!(UnsizedLocal); - } - // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized. - let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?; - Operand::Indirect(*dest_place.mplace()) - } else { - assert!(!meta.has_meta()); // we're dropping the metadata - // Just make this an efficient immediate. - // Note that not calling `layout_of` here does have one real consequence: - // if the type is too big, we'll only notice this when the local is actually initialized, - // which is a bit too late -- we should ideally notice this already here, when the memory - // is conceptually allocated. But given how rare that error is and that this is a hot function, - // we accept this downside for now. - Operand::Immediate(Immediate::Uninit) - }); - - // If the local is already live, deallocate its old memory. - let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val); - self.deallocate_local(old)?; - Ok(()) - } - - /// Mark a storage as live, killing the previous content. - #[inline(always)] - pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> { - self.storage_live_dyn(local, MemPlaceMeta::None) - } - - pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> { - assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); - trace!("{:?} is now dead", local); - - // If the local is already dead, this is a NOP. - let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead); - self.deallocate_local(old)?; - Ok(()) - } - - #[instrument(skip(self), level = "debug")] - fn deallocate_local(&mut self, local: LocalValue) -> InterpResult<'tcx> { - if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { - // All locals have a backing allocation, even if the allocation is empty - // due to the local having ZST type. Hence we can `unwrap`. - trace!( - "deallocating local {:?}: {:?}", - local, - // Locals always have a `alloc_id` (they are never the result of a int2ptr). - self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap()) - ); - self.deallocate_ptr(ptr, None, MemoryKind::Stack)?; - }; - Ok(()) - } - /// Call a query that can return `ErrorHandled`. Should be used for statics and other globals. /// (`mir::Const`/`ty::Const` have `eval` methods that can be used directly instead.) pub fn ctfe_query( @@ -1328,39 +617,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for PlacePrinter<'a, 'tcx, M> { } write!(fmt, ":")?; - match self.ecx.frame().locals[local].value { - LocalValue::Dead => write!(fmt, " is dead")?, - LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => { - write!(fmt, " is uninitialized")? - } - LocalValue::Live(Operand::Indirect(mplace)) => { - write!( - fmt, - " by {} ref {:?}:", - match mplace.meta { - MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"), - MemPlaceMeta::None => String::new(), - }, - mplace.ptr, - )?; - allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id)); - } - LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { - write!(fmt, " {val:?}")?; - if let Scalar::Ptr(ptr, _size) = val { - allocs.push(ptr.provenance.get_alloc_id()); - } - } - LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { - write!(fmt, " ({val1:?}, {val2:?})")?; - if let Scalar::Ptr(ptr, _size) = val1 { - allocs.push(ptr.provenance.get_alloc_id()); - } - if let Scalar::Ptr(ptr, _size) = val2 { - allocs.push(ptr.provenance.get_alloc_id()); - } - } - } + self.ecx.frame().locals[local].print(&mut allocs, fmt)?; write!(fmt, ": {:?}", self.ecx.dump_allocs(allocs.into_iter().flatten().collect())) } diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index bdce8253b2e76..7af4e0c285b0b 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -37,7 +37,7 @@ pub enum ReturnAction { /// took care of everything. NoJump, - /// Returned by [`InterpCx::pop_stack_frame`] when no cleanup should be done. + /// Returned by [`InterpCx::pop_stack_frame_raw`] when no cleanup should be done. NoCleanup, } diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs index afa2303e38715..511756e3f86c9 100644 --- a/compiler/rustc_const_eval/src/interpret/mod.rs +++ b/compiler/rustc_const_eval/src/interpret/mod.rs @@ -1,5 +1,6 @@ //! An interpreter for MIR used in CTFE and by miri +mod call; mod cast; mod discriminant; mod eval_context; @@ -11,8 +12,8 @@ mod operand; mod operator; mod place; mod projection; +mod stack; mod step; -mod terminator; mod traits; mod util; mod validity; @@ -22,7 +23,8 @@ use eval_context::{from_known_layout, mir_assign_valid_types}; #[doc(no_inline)] pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here -pub use self::eval_context::{format_interp_error, Frame, FrameInfo, InterpCx, StackPopCleanup}; +pub use self::call::FnArg; +pub use self::eval_context::{format_interp_error, InterpCx}; pub use self::intern::{ intern_const_alloc_for_constprop, intern_const_alloc_recursive, HasStaticRootDefId, InternKind, InternResult, @@ -35,7 +37,7 @@ pub use self::operand::{ImmTy, Immediate, OpTy, Readable}; pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable}; use self::place::{MemPlace, Place}; pub use self::projection::{OffsetMode, Projectable}; -pub use self::terminator::FnArg; +pub use self::stack::{Frame, FrameInfo, LocalState, StackPopCleanup, StackPopInfo}; pub(crate) use self::util::create_static_alloc; pub use self::validity::{CtfeValidationMode, RefTracking}; pub use self::visitor::ValueVisitor; diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index d4559e1e8c64e..4ced009ab39b6 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -184,6 +184,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn from_scalar(val: Scalar, layout: TyAndLayout<'tcx>) -> Self { debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout"); + debug_assert_eq!(val.size(), layout.size); ImmTy { imm: val.into(), layout } } diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs new file mode 100644 index 0000000000000..50dbced6a2a6a --- /dev/null +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -0,0 +1,651 @@ +//! Manages the low-level pushing and popping of stack frames and the (de)allocation of local variables. +//! For hadling of argument passing and return values, see the `call` module. +use std::cell::Cell; +use std::{fmt, mem}; + +use either::{Either, Left, Right}; +use rustc_hir as hir; +use rustc_hir::definitions::DefPathData; +use rustc_index::IndexVec; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_middle::{bug, mir}; +use rustc_mir_dataflow::storage::always_storage_live_locals; +use rustc_span::Span; +use tracing::{info_span, instrument, trace}; + +use super::{ + from_known_layout, throw_ub, throw_unsup, AllocId, CtfeProvenance, Immediate, InterpCx, + InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, MemoryKind, Operand, Pointer, + Provenance, ReturnAction, Scalar, +}; +use crate::errors; + +// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread +// boundary and dropped in the other thread, it would exit the span in the other thread. +struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>); + +impl SpanGuard { + /// By default a `SpanGuard` does nothing. + fn new() -> Self { + Self(tracing::Span::none(), std::marker::PhantomData) + } + + /// If a span is entered, we exit the previous span (if any, normally none) and enter the + /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of + /// `Frame` by creating a dummy span to being with and then entering it once the frame has + /// been pushed. + fn enter(&mut self, span: tracing::Span) { + // This executes the destructor on the previous instance of `SpanGuard`, ensuring that + // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we + // can't protect the tracing stack, but that'll just lead to weird logging, no actual + // problems. + *self = Self(span, std::marker::PhantomData); + self.0.with_subscriber(|(id, dispatch)| { + dispatch.enter(id); + }); + } +} + +impl Drop for SpanGuard { + fn drop(&mut self) { + self.0.with_subscriber(|(id, dispatch)| { + dispatch.exit(id); + }); + } +} + +/// A stack frame. +pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { + //////////////////////////////////////////////////////////////////////////////// + // Function and callsite information + //////////////////////////////////////////////////////////////////////////////// + /// The MIR for the function called on this frame. + pub(super) body: &'tcx mir::Body<'tcx>, + + /// The def_id and args of the current function. + pub(super) instance: ty::Instance<'tcx>, + + /// Extra data for the machine. + pub extra: Extra, + + //////////////////////////////////////////////////////////////////////////////// + // Return place and locals + //////////////////////////////////////////////////////////////////////////////// + /// Work to perform when returning from this function. + return_to_block: StackPopCleanup, + + /// The location where the result of the current stack frame should be written to, + /// and its layout in the caller. + pub return_place: MPlaceTy<'tcx, Prov>, + + /// The list of locals for this stack frame, stored in order as + /// `[return_ptr, arguments..., variables..., temporaries...]`. + /// The locals are stored as `Option`s. + /// `None` represents a local that is currently dead, while a live local + /// can either directly contain `Scalar` or refer to some part of an `Allocation`. + /// + /// Do *not* access this directly; always go through the machine hook! + pub locals: IndexVec>, + + /// The span of the `tracing` crate is stored here. + /// When the guard is dropped, the span is exited. This gives us + /// a full stack trace on all tracing statements. + tracing_span: SpanGuard, + + //////////////////////////////////////////////////////////////////////////////// + // Current position within the function + //////////////////////////////////////////////////////////////////////////////// + /// If this is `Right`, we are not currently executing any particular statement in + /// this frame (can happen e.g. during frame initialization, and during unwinding on + /// frames without cleanup code). + /// + /// Needs to be public because ConstProp does unspeakable things to it. + pub(super) loc: Either, +} + +#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these +pub enum StackPopCleanup { + /// Jump to the next block in the caller, or cause UB if None (that's a function + /// that may never return). Also store layout of return place so + /// we can validate it at that layout. + /// `ret` stores the block we jump to on a normal return, while `unwind` + /// stores the block used for cleanup during unwinding. + Goto { ret: Option, unwind: mir::UnwindAction }, + /// The root frame of the stack: nowhere else to jump to. + /// `cleanup` says whether locals are deallocated. Static computation + /// wants them leaked to intern what they need (and just throw away + /// the entire `ecx` when it is done). + Root { cleanup: bool }, +} + +/// Return type of [`InterpCx::pop_stack_frame_raw`]. +pub struct StackPopInfo<'tcx, Prov: Provenance> { + /// Additional information about the action to be performed when returning from the popped + /// stack frame. + pub return_action: ReturnAction, + + /// [`return_to_block`](Frame::return_to_block) of the popped stack frame. + pub return_to_block: StackPopCleanup, + + /// [`return_place`](Frame::return_place) of the popped stack frame. + pub return_place: MPlaceTy<'tcx, Prov>, +} + +/// State of a local variable including a memoized layout +#[derive(Clone)] +pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> { + value: LocalValue, + /// Don't modify if `Some`, this is only used to prevent computing the layout twice. + /// Avoids computing the layout of locals that are never actually initialized. + layout: Cell>>, +} + +impl std::fmt::Debug for LocalState<'_, Prov> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LocalState") + .field("value", &self.value) + .field("ty", &self.layout.get().map(|l| l.ty)) + .finish() + } +} + +/// Current value of a local variable +/// +/// This does not store the type of the local; the type is given by `body.local_decls` and can never +/// change, so by not storing here we avoid having to maintain that as an invariant. +#[derive(Copy, Clone, Debug)] // Miri debug-prints these +pub(super) enum LocalValue { + /// This local is not currently alive, and cannot be used at all. + Dead, + /// A normal, live local. + /// Mostly for convenience, we re-use the `Operand` type here. + /// This is an optimization over just always having a pointer here; + /// we can thus avoid doing an allocation when the local just stores + /// immediate values *and* never has its address taken. + Live(Operand), +} + +impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { + pub fn make_live_uninit(&mut self) { + self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit)); + } + + /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState` + /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type + /// private. + pub fn as_mplace_or_imm( + &self, + ) -> Option>, MemPlaceMeta), Immediate>> { + match self.value { + LocalValue::Dead => None, + LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))), + LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)), + } + } + + /// Read the local's value or error if the local is not yet live or not live anymore. + #[inline(always)] + pub(super) fn access(&self) -> InterpResult<'tcx, &Operand> { + match &self.value { + LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? + LocalValue::Live(val) => Ok(val), + } + } + + /// Overwrite the local. If the local can be overwritten in place, return a reference + /// to do so; otherwise return the `MemPlace` to consult instead. + #[inline(always)] + pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand> { + match &mut self.value { + LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? + LocalValue::Live(val) => Ok(val), + } + } +} + +/// What we store about a frame in an interpreter backtrace. +#[derive(Clone, Debug)] +pub struct FrameInfo<'tcx> { + pub instance: ty::Instance<'tcx>, + pub span: Span, +} + +// FIXME: only used by miri, should be removed once translatable. +impl<'tcx> fmt::Display for FrameInfo<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(|tcx| { + if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { + write!(f, "inside closure") + } else { + // Note: this triggers a `must_produce_diag` state, which means that if we ever + // get here we must emit a diagnostic. We should never display a `FrameInfo` unless + // we actually want to emit a warning or error to the user. + write!(f, "inside `{}`", self.instance) + } + }) + } +} + +impl<'tcx> FrameInfo<'tcx> { + pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote { + let span = self.span; + if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { + errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 } + } else { + let instance = format!("{}", self.instance); + // Note: this triggers a `must_produce_diag` state, which means that if we ever get + // here we must emit a diagnostic. We should never display a `FrameInfo` unless we + // actually want to emit a warning or error to the user. + errors::FrameNote { where_: "instance", span, instance, times: 0 } + } + } +} + +impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> { + pub fn with_extra(self, extra: Extra) -> Frame<'tcx, Prov, Extra> { + Frame { + body: self.body, + instance: self.instance, + return_to_block: self.return_to_block, + return_place: self.return_place, + locals: self.locals, + loc: self.loc, + extra, + tracing_span: self.tracing_span, + } + } +} + +impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { + /// Get the current location within the Frame. + /// + /// If this is `Right`, we are not currently executing any particular statement in + /// this frame (can happen e.g. during frame initialization, and during unwinding on + /// frames without cleanup code). + /// + /// Used by [priroda](https://github.com/oli-obk/priroda). + pub fn current_loc(&self) -> Either { + self.loc + } + + pub fn body(&self) -> &'tcx mir::Body<'tcx> { + self.body + } + + pub fn instance(&self) -> ty::Instance<'tcx> { + self.instance + } + + /// Return the `SourceInfo` of the current instruction. + pub fn current_source_info(&self) -> Option<&mir::SourceInfo> { + self.loc.left().map(|loc| self.body.source_info(loc)) + } + + pub fn current_span(&self) -> Span { + match self.loc { + Left(loc) => self.body.source_info(loc).span, + Right(span) => span, + } + } + + pub fn lint_root(&self, tcx: TyCtxt<'tcx>) -> Option { + // We first try to get a HirId via the current source scope, + // and fall back to `body.source`. + self.current_source_info() + .and_then(|source_info| match &self.body.source_scopes[source_info.scope].local_data { + mir::ClearCrossCrate::Set(data) => Some(data.lint_root), + mir::ClearCrossCrate::Clear => None, + }) + .or_else(|| { + let def_id = self.body.source.def_id().as_local(); + def_id.map(|def_id| tcx.local_def_id_to_hir_id(def_id)) + }) + } + + /// Returns the address of the buffer where the locals are stored. This is used by `Place` as a + /// sanity check to detect bugs where we mix up which stack frame a place refers to. + #[inline(always)] + pub(super) fn locals_addr(&self) -> usize { + self.locals.raw.as_ptr().addr() + } + + #[must_use] + pub fn generate_stacktrace_from_stack(stack: &[Self]) -> Vec> { + let mut frames = Vec::new(); + // This deliberately does *not* honor `requires_caller_location` since it is used for much + // more than just panics. + for frame in stack.iter().rev() { + let span = match frame.loc { + Left(loc) => { + // If the stacktrace passes through MIR-inlined source scopes, add them. + let mir::SourceInfo { mut span, scope } = *frame.body.source_info(loc); + let mut scope_data = &frame.body.source_scopes[scope]; + while let Some((instance, call_span)) = scope_data.inlined { + frames.push(FrameInfo { span, instance }); + span = call_span; + scope_data = &frame.body.source_scopes[scope_data.parent_scope.unwrap()]; + } + span + } + Right(span) => span, + }; + frames.push(FrameInfo { span, instance: frame.instance }); + } + trace!("generate stacktrace: {:#?}", frames); + frames + } +} + +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { + /// Very low-level helper that pushes a stack frame without initializing + /// the arguments or local variables. + /// + /// The high-level version of this is `init_stack_frame`. + #[instrument(skip(self, body, return_place, return_to_block), level = "debug")] + pub(crate) fn push_stack_frame_raw( + &mut self, + instance: ty::Instance<'tcx>, + body: &'tcx mir::Body<'tcx>, + return_place: &MPlaceTy<'tcx, M::Provenance>, + return_to_block: StackPopCleanup, + ) -> InterpResult<'tcx> { + trace!("body: {:#?}", body); + + // We can push a `Root` frame if and only if the stack is empty. + debug_assert_eq!( + self.stack().is_empty(), + matches!(return_to_block, StackPopCleanup::Root { .. }) + ); + + // First push a stack frame so we have access to `instantiate_from_current_frame` and other + // `self.frame()`-based functions. + let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) }; + let locals = IndexVec::from_elem(dead_local, &body.local_decls); + let pre_frame = Frame { + body, + loc: Right(body.span), // Span used for errors caused during preamble. + return_to_block, + return_place: return_place.clone(), + locals, + instance, + tracing_span: SpanGuard::new(), + extra: (), + }; + let frame = M::init_frame(self, pre_frame)?; + self.stack_mut().push(frame); + + // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check). + for &const_ in body.required_consts() { + let c = + self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?; + c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| { + err.emit_note(*self.tcx); + err + })?; + } + + // Finish things up. + M::after_stack_push(self)?; + self.frame_mut().loc = Left(mir::Location::START); + let span = info_span!("frame", "{}", instance); + self.frame_mut().tracing_span.enter(span); + + Ok(()) + } + + /// Low-level helper that pops a stack frame from the stack and returns some information about + /// it. + /// + /// This also deallocates locals, if necessary. + /// + /// [`M::before_stack_pop`] should be called before calling this function. + /// [`M::after_stack_pop`] is called by this function automatically. + /// + /// The high-level version of this is `return_from_current_stack_frame`. + /// + /// [`M::before_stack_pop`]: Machine::before_stack_pop + /// [`M::after_stack_pop`]: Machine::after_stack_pop + pub(super) fn pop_stack_frame_raw( + &mut self, + unwinding: bool, + ) -> InterpResult<'tcx, StackPopInfo<'tcx, M::Provenance>> { + let cleanup = self.cleanup_current_frame_locals()?; + + let frame = + self.stack_mut().pop().expect("tried to pop a stack frame, but there were none"); + + let return_to_block = frame.return_to_block; + let return_place = frame.return_place.clone(); + + let return_action; + if cleanup { + return_action = M::after_stack_pop(self, frame, unwinding)?; + assert_ne!(return_action, ReturnAction::NoCleanup); + } else { + return_action = ReturnAction::NoCleanup; + }; + + Ok(StackPopInfo { return_action, return_to_block, return_place }) + } + + /// A private helper for [`pop_stack_frame_raw`](InterpCx::pop_stack_frame_raw). + /// Returns `true` if cleanup has been done, `false` otherwise. + fn cleanup_current_frame_locals(&mut self) -> InterpResult<'tcx, bool> { + // Cleanup: deallocate locals. + // Usually we want to clean up (deallocate locals), but in a few rare cases we don't. + // We do this while the frame is still on the stack, so errors point to the callee. + let return_to_block = self.frame().return_to_block; + let cleanup = match return_to_block { + StackPopCleanup::Goto { .. } => true, + StackPopCleanup::Root { cleanup, .. } => cleanup, + }; + + if cleanup { + // We need to take the locals out, since we need to mutate while iterating. + let locals = mem::take(&mut self.frame_mut().locals); + for local in &locals { + self.deallocate_local(local.value)?; + } + } + + Ok(cleanup) + } + + /// In the current stack frame, mark all locals as live that are not arguments and don't have + /// `Storage*` annotations (this includes the return place). + pub(crate) fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> { + self.storage_live(mir::RETURN_PLACE)?; + + let body = self.body(); + let always_live = always_storage_live_locals(body); + for local in body.vars_and_temps_iter() { + if always_live.contains(local) { + self.storage_live(local)?; + } + } + Ok(()) + } + + pub fn storage_live_dyn( + &mut self, + local: mir::Local, + meta: MemPlaceMeta, + ) -> InterpResult<'tcx> { + trace!("{:?} is now live", local); + + // We avoid `ty.is_trivially_sized` since that does something expensive for ADTs. + fn is_very_trivially_sized(ty: Ty<'_>) -> bool { + match ty.kind() { + ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) + | ty::Uint(_) + | ty::Int(_) + | ty::Bool + | ty::Float(_) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::RawPtr(..) + | ty::Char + | ty::Ref(..) + | ty::Coroutine(..) + | ty::CoroutineWitness(..) + | ty::Array(..) + | ty::Closure(..) + | ty::CoroutineClosure(..) + | ty::Never + | ty::Error(_) + | ty::Dynamic(_, _, ty::DynStar) => true, + + ty::Str | ty::Slice(_) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => false, + + ty::Tuple(tys) => tys.last().is_none_or(|ty| is_very_trivially_sized(*ty)), + + ty::Pat(ty, ..) => is_very_trivially_sized(*ty), + + // We don't want to do any queries, so there is not much we can do with ADTs. + ty::Adt(..) => false, + + ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false, + + ty::Infer(ty::TyVar(_)) => false, + + ty::Bound(..) + | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { + bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty) + } + } + } + + // This is a hot function, we avoid computing the layout when possible. + // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types. + let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) { + None + } else { + // We need the layout. + let layout = self.layout_of_local(self.frame(), local, None)?; + if layout.is_sized() { None } else { Some(layout) } + }; + + let local_val = LocalValue::Live(if let Some(layout) = unsized_ { + if !meta.has_meta() { + throw_unsup!(UnsizedLocal); + } + // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized. + let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?; + Operand::Indirect(*dest_place.mplace()) + } else { + assert!(!meta.has_meta()); // we're dropping the metadata + // Just make this an efficient immediate. + // Note that not calling `layout_of` here does have one real consequence: + // if the type is too big, we'll only notice this when the local is actually initialized, + // which is a bit too late -- we should ideally notice this already here, when the memory + // is conceptually allocated. But given how rare that error is and that this is a hot function, + // we accept this downside for now. + Operand::Immediate(Immediate::Uninit) + }); + + // If the local is already live, deallocate its old memory. + let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val); + self.deallocate_local(old)?; + Ok(()) + } + + /// Mark a storage as live, killing the previous content. + #[inline(always)] + pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> { + self.storage_live_dyn(local, MemPlaceMeta::None) + } + + pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> { + assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); + trace!("{:?} is now dead", local); + + // If the local is already dead, this is a NOP. + let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead); + self.deallocate_local(old)?; + Ok(()) + } + + fn deallocate_local(&mut self, local: LocalValue) -> InterpResult<'tcx> { + if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { + // All locals have a backing allocation, even if the allocation is empty + // due to the local having ZST type. Hence we can `unwrap`. + trace!( + "deallocating local {:?}: {:?}", + local, + // Locals always have a `alloc_id` (they are never the result of a int2ptr). + self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap()) + ); + self.deallocate_ptr(ptr, None, MemoryKind::Stack)?; + }; + Ok(()) + } + + #[inline(always)] + pub(super) fn layout_of_local( + &self, + frame: &Frame<'tcx, M::Provenance, M::FrameExtra>, + local: mir::Local, + layout: Option>, + ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { + let state = &frame.locals[local]; + if let Some(layout) = state.layout.get() { + return Ok(layout); + } + + let layout = from_known_layout(self.tcx, self.param_env, layout, || { + let local_ty = frame.body.local_decls[local].ty; + let local_ty = + self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?; + self.layout_of(local_ty) + })?; + + // Layouts of locals are requested a lot, so we cache them. + state.layout.set(Some(layout)); + Ok(layout) + } +} + +impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { + pub(super) fn print( + &self, + allocs: &mut Vec>, + fmt: &mut std::fmt::Formatter<'_>, + ) -> std::fmt::Result { + match self.value { + LocalValue::Dead => write!(fmt, " is dead")?, + LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => { + write!(fmt, " is uninitialized")? + } + LocalValue::Live(Operand::Indirect(mplace)) => { + write!( + fmt, + " by {} ref {:?}:", + match mplace.meta { + MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"), + MemPlaceMeta::None => String::new(), + }, + mplace.ptr, + )?; + allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id)); + } + LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { + write!(fmt, " {val:?}")?; + if let Scalar::Ptr(ptr, _size) = val { + allocs.push(ptr.provenance.get_alloc_id()); + } + } + LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { + write!(fmt, " ({val1:?}, {val2:?})")?; + if let Scalar::Ptr(ptr, _size) = val1 { + allocs.push(ptr.provenance.get_alloc_id()); + } + if let Scalar::Ptr(ptr, _size) = val2 { + allocs.push(ptr.provenance.get_alloc_id()); + } + } + } + + Ok(()) + } +} diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index 28cf1068f4081..2527eca344620 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -4,15 +4,29 @@ use either::Either; use rustc_index::IndexSlice; -use rustc_middle::{bug, mir}; +use rustc_middle::ty::layout::FnAbiOf; +use rustc_middle::ty::{self, Instance, Ty}; +use rustc_middle::{bug, mir, span_bug}; +use rustc_span::source_map::Spanned; +use rustc_target::abi::call::FnAbi; use rustc_target::abi::{FieldIdx, FIRST_VARIANT}; use tracing::{info, instrument, trace}; use super::{ - ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy, Projectable, Scalar, + throw_ub, FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, + PlaceTy, Projectable, Scalar, }; use crate::util; +struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> { + callee: FnVal<'tcx, M::ExtraFnVal>, + args: Vec>, + fn_sig: ty::FnSig<'tcx>, + fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>, + /// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`]) + with_caller_location: bool, +} + impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Returns `true` as long as there are more things to do. /// @@ -36,7 +50,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { if let Some(stmt) = basic_block.statements.get(loc.statement_index) { let old_frames = self.frame_idx(); - self.statement(stmt)?; + self.eval_statement(stmt)?; // Make sure we are not updating `statement_index` of the wrong frame. assert_eq!(old_frames, self.frame_idx()); // Advance the program counter. @@ -47,7 +61,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { M::before_terminator(self)?; let terminator = basic_block.terminator(); - self.terminator(terminator)?; + self.eval_terminator(terminator)?; + if !self.stack().is_empty() { + if let Either::Left(loc) = self.frame().loc { + info!("// executing {:?}", loc.block); + } + } Ok(true) } @@ -55,7 +74,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// statement counter. /// /// This does NOT move the statement counter forward, the caller has to do that! - pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> { + pub fn eval_statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> { info!("{:?}", stmt); use rustc_middle::mir::StatementKind::*; @@ -349,16 +368,222 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Ok(()) } - /// Evaluate the given terminator. Will also adjust the stack frame and statement position accordingly. - fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> { + /// Evaluate the arguments of a function call + fn eval_fn_call_argument( + &self, + op: &mir::Operand<'tcx>, + ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> { + Ok(match op { + mir::Operand::Copy(_) | mir::Operand::Constant(_) => { + // Make a regular copy. + let op = self.eval_operand(op, None)?; + FnArg::Copy(op) + } + mir::Operand::Move(place) => { + // If this place lives in memory, preserve its location. + // We call `place_to_op` which will be an `MPlaceTy` whenever there exists + // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local` + // which can return a local even if that has an mplace.) + let place = self.eval_place(*place)?; + let op = self.place_to_op(&place)?; + + match op.as_mplace_or_imm() { + Either::Left(mplace) => FnArg::InPlace(mplace), + Either::Right(_imm) => { + // This argument doesn't live in memory, so there's no place + // to make inaccessible during the call. + // We rely on there not being any stray `PlaceTy` that would let the + // caller directly access this local! + // This is also crucial for tail calls, where we want the `FnArg` to + // stay valid when the old stack frame gets popped. + FnArg::Copy(op) + } + } + } + }) + } + + /// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the + /// necessary information about callee and arguments to make a call. + fn eval_callee_and_args( + &self, + terminator: &mir::Terminator<'tcx>, + func: &mir::Operand<'tcx>, + args: &[Spanned>], + ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> { + let func = self.eval_operand(func, None)?; + let args = args + .iter() + .map(|arg| self.eval_fn_call_argument(&arg.node)) + .collect::>>()?; + + let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx); + let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder); + let extra_args = &args[fn_sig.inputs().len()..]; + let extra_args = + self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty)); + + let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() { + ty::FnPtr(_sig) => { + let fn_ptr = self.read_pointer(&func)?; + let fn_val = self.get_ptr_fn(fn_ptr)?; + (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false) + } + ty::FnDef(def_id, args) => { + let instance = self.resolve(def_id, args)?; + ( + FnVal::Instance(instance), + self.fn_abi_of_instance(instance, extra_args)?, + instance.def.requires_caller_location(*self.tcx), + ) + } + _ => { + span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty) + } + }; + + Ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location }) + } + + fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> { info!("{:?}", terminator.kind); - self.eval_terminator(terminator)?; - if !self.stack().is_empty() { - if let Either::Left(loc) = self.frame().loc { - info!("// executing {:?}", loc.block); + use rustc_middle::mir::TerminatorKind::*; + match terminator.kind { + Return => { + self.return_from_current_stack_frame(/* unwinding */ false)? + } + + Goto { target } => self.go_to_block(target), + + SwitchInt { ref discr, ref targets } => { + let discr = self.read_immediate(&self.eval_operand(discr, None)?)?; + trace!("SwitchInt({:?})", *discr); + + // Branch to the `otherwise` case by default, if no match is found. + let mut target_block = targets.otherwise(); + + for (const_int, target) in targets.iter() { + // Compare using MIR BinOp::Eq, to also support pointer values. + // (Avoiding `self.binary_op` as that does some redundant layout computation.) + let res = self.binary_op( + mir::BinOp::Eq, + &discr, + &ImmTy::from_uint(const_int, discr.layout), + )?; + if res.to_scalar().to_bool()? { + target_block = target; + break; + } + } + + self.go_to_block(target_block); + } + + Call { + ref func, + ref args, + destination, + target, + unwind, + call_source: _, + fn_span: _, + } => { + let old_stack = self.frame_idx(); + let old_loc = self.frame().loc; + + let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } = + self.eval_callee_and_args(terminator, func, args)?; + + let destination = self.force_allocation(&self.eval_place(destination)?)?; + self.init_fn_call( + callee, + (fn_sig.abi, fn_abi), + &args, + with_caller_location, + &destination, + target, + if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable }, + )?; + // Sanity-check that `eval_fn_call` either pushed a new frame or + // did a jump to another block. + if self.frame_idx() == old_stack && self.frame().loc == old_loc { + span_bug!(terminator.source_info.span, "evaluating this call made no progress"); + } + } + + TailCall { ref func, ref args, fn_span: _ } => { + let old_frame_idx = self.frame_idx(); + + let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } = + self.eval_callee_and_args(terminator, func, args)?; + + self.init_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?; + + if self.frame_idx() != old_frame_idx { + span_bug!( + terminator.source_info.span, + "evaluating this tail call pushed a new stack frame" + ); + } + } + + Drop { place, target, unwind, replace: _ } => { + let place = self.eval_place(place)?; + let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); + if let ty::InstanceKind::DropGlue(_, None) = instance.def { + // This is the branch we enter if and only if the dropped type has no drop glue + // whatsoever. This can happen as a result of monomorphizing a drop of a + // generic. In order to make sure that generic and non-generic code behaves + // roughly the same (and in keeping with Mir semantics) we do nothing here. + self.go_to_block(target); + return Ok(()); + } + trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty); + self.init_drop_in_place_call(&place, instance, target, unwind)?; + } + + Assert { ref cond, expected, ref msg, target, unwind } => { + let ignored = + M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check(); + let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?; + if ignored || expected == cond_val { + self.go_to_block(target); + } else { + M::assert_panic(self, msg, unwind)?; + } + } + + UnwindTerminate(reason) => { + M::unwind_terminate(self, reason)?; + } + + // When we encounter Resume, we've finished unwinding + // cleanup for the current stack frame. We pop it in order + // to continue unwinding the next frame + UnwindResume => { + trace!("unwinding: resuming from cleanup"); + // By definition, a Resume terminator means + // that we're unwinding + self.return_from_current_stack_frame(/* unwinding */ true)?; + return Ok(()); + } + + // It is UB to ever encounter this. + Unreachable => throw_ub!(Unreachable), + + // These should never occur for MIR we actually run. + FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!( + terminator.source_info.span, + "{:#?} should have been eliminated by MIR pass", + terminator.kind + ), + + InlineAsm { template, ref operands, options, ref targets, .. } => { + M::eval_inline_asm(self, template, operands, options, targets)?; } } + Ok(()) } } diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index a0d0d80b9572e..cf7d724ab63ae 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -250,6 +250,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { | sym::deny | sym::forbid | sym::cfg + | sym::cfg_attr // need to be fixed | sym::cfi_encoding // FIXME(cfi_encoding) | sym::may_dangle // FIXME(dropck_eyepatch) diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index f58a991a616da..a60af3f2d7137 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -838,10 +838,14 @@ pub enum Input { impl Input { pub fn filestem(&self) -> &str { - match *self { - Input::File(ref ifile) => ifile.file_stem().unwrap().to_str().unwrap(), - Input::Str { .. } => "rust_out", + if let Input::File(ifile) = self { + // If for some reason getting the file stem as a UTF-8 string fails, + // then fallback to a fixed name. + if let Some(name) = ifile.file_stem().and_then(OsStr::to_str) { + return name; + } } + "rust_out" } pub fn source_name(&self) -> FileName { diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 94cf21da4efb8..32fca6733bb55 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -276,6 +276,7 @@ symbols! { Path, PathBuf, Pending, + PinCoerceUnsized, Pointer, Poll, ProcMacro, diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 38f50955b122e..7de412595993a 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -200,7 +200,7 @@ use core::ops::{ AsyncFn, AsyncFnMut, AsyncFnOnce, CoerceUnsized, Coroutine, CoroutineState, Deref, DerefMut, DerefPure, DispatchFromDyn, Receiver, }; -use core::pin::Pin; +use core::pin::{Pin, PinCoerceUnsized}; use core::ptr::{self, addr_of_mut, NonNull, Unique}; use core::task::{Context, Poll}; use core::{borrow, fmt, slice}; @@ -2726,3 +2726,6 @@ impl core::error::Error for Box { core::error::Error::provide(&**self, request); } } + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for Box {} diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 28b08ef561143..3e44adf73f045 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -138,6 +138,7 @@ #![feature(maybe_uninit_uninit_array_transpose)] #![feature(panic_internals)] #![feature(pattern)] +#![feature(pin_coerce_unsized_trait)] #![feature(ptr_internals)] #![feature(ptr_metadata)] #![feature(ptr_sub_ptr)] diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index 13d218e43a7af..bdee06154faec 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -256,6 +256,7 @@ use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, Rece use core::panic::{RefUnwindSafe, UnwindSafe}; #[cfg(not(no_global_oom_handling))] use core::pin::Pin; +use core::pin::PinCoerceUnsized; use core::ptr::{self, drop_in_place, NonNull}; #[cfg(not(no_global_oom_handling))] use core::slice::from_raw_parts_mut; @@ -2177,6 +2178,12 @@ impl Deref for Rc { } } +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for Rc {} + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for Weak {} + #[unstable(feature = "deref_pure_trait", issue = "87121")] unsafe impl DerefPure for Rc {} @@ -3691,6 +3698,9 @@ impl Deref for UniqueRc { } } +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for UniqueRc {} + #[unstable(feature = "unique_rc_arc", issue = "112566")] impl DerefMut for UniqueRc { fn deref_mut(&mut self) -> &mut T { diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index 7dcf344cdc5e0..2d6bb28106c5a 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -178,15 +178,25 @@ impl [T] { /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) /// worst-case. /// - /// If `T: Ord` does not implement a total order the resulting order is unspecified. All - /// original elements will remain in the slice and any possible modifications via interior - /// mutability are observed in the input. Same is true if `T: Ord` panics. + /// If the implementation of [`Ord`] for `T` does not implement a [total order] the resulting + /// order of elements in the slice is unspecified. All original elements will remain in the + /// slice and any possible modifications via interior mutability are observed in the input. Same + /// is true if the implementation of [`Ord`] for `T` panics. /// /// When applicable, unstable sorting is preferred because it is generally faster than stable /// sorting and it doesn't allocate auxiliary memory. See /// [`sort_unstable`](slice::sort_unstable). The exception are partially sorted slices, which /// may be better served with `slice::sort`. /// + /// Sorting types that only implement [`PartialOrd`] such as [`f32`] and [`f64`] require + /// additional precautions. For example, `f32::NAN != f32::NAN`, which doesn't fulfill the + /// reflexivity requirement of [`Ord`]. By using an alternative comparison function with + /// [`slice::sort_by`] such as [`f32::total_cmp`] or [`f64::total_cmp`] that defines a [total + /// order] users can sort slices containing floating-point values. Alternatively, if all values + /// in the slice are guaranteed to be in a subset for which [`PartialOrd::partial_cmp`] forms a + /// [total order], it's possible to sort the slice with `sort_by(|a, b| + /// a.partial_cmp(b).unwrap())`. + /// /// # Current implementation /// /// The current implementation is based on [driftsort] by Orson Peters and Lukas Bergdoll, which @@ -198,18 +208,21 @@ impl [T] { /// handled without allocation, medium sized slices allocate `self.len()` and beyond that it /// clamps at `self.len() / 2`. /// - /// If `T: Ord` does not implement a total order, the implementation may panic. + /// # Panics + /// + /// May panic if the implementation of [`Ord`] for `T` does not implement a [total order]. /// /// # Examples /// /// ``` - /// let mut v = [-5, 4, 1, -3, 2]; + /// let mut v = [4, -5, 1, -3, 2]; /// /// v.sort(); - /// assert!(v == [-5, -3, 1, 2, 4]); + /// assert_eq!(v, [-5, -3, 1, 2, 4]); /// ``` /// /// [driftsort]: https://github.com/Voultapher/driftsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[cfg(not(no_global_oom_handling))] #[rustc_allow_incoherent_impl] #[stable(feature = "rust1", since = "1.0.0")] @@ -221,30 +234,19 @@ impl [T] { stable_sort(self, T::lt); } - /// Sorts the slice with a comparator function, preserving initial order of equal elements. + /// Sorts the slice with a comparison function, preserving initial order of equal elements. /// /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) /// worst-case. /// - /// The comparator function should define a total ordering for the elements in the slice. If the - /// ordering is not total, the order of the elements is unspecified. - /// - /// If the comparator function does not implement a total order the resulting order is - /// unspecified. All original elements will remain in the slice and any possible modifications - /// via interior mutability are observed in the input. Same is true if the comparator function - /// panics. A total order (for all `a`, `b` and `c`): + /// If the comparison function `compare` does not implement a [total order] the resulting order + /// of elements in the slice is unspecified. All original elements will remain in the slice and + /// any possible modifications via interior mutability are observed in the input. Same is true + /// if `compare` panics. /// - /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and - /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`. - /// - /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use - /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`. - /// - /// ``` - /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0]; - /// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap()); - /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]); - /// ``` + /// For example `|a, b| (a - b).cmp(a)` is a comparison function that is neither transitive nor + /// reflexive nor total, `a < b < c < a` with `a = 1, b = 2, c = 3`. For more information and + /// examples see the [`Ord`] documentation. /// /// # Current implementation /// @@ -257,21 +259,24 @@ impl [T] { /// handled without allocation, medium sized slices allocate `self.len()` and beyond that it /// clamps at `self.len() / 2`. /// - /// If `T: Ord` does not implement a total order, the implementation may panic. + /// # Panics + /// + /// May panic if `compare` does not implement a [total order]. /// /// # Examples /// /// ``` - /// let mut v = [5, 4, 1, 3, 2]; + /// let mut v = [4, -5, 1, -3, 2]; /// v.sort_by(|a, b| a.cmp(b)); - /// assert!(v == [1, 2, 3, 4, 5]); + /// assert_eq!(v, [-5, -3, 1, 2, 4]); /// /// // reverse sorting /// v.sort_by(|a, b| b.cmp(a)); - /// assert!(v == [5, 4, 3, 2, 1]); + /// assert_eq!(v, [4, 2, 1, -3, -5]); /// ``` /// /// [driftsort]: https://github.com/Voultapher/driftsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[cfg(not(no_global_oom_handling))] #[rustc_allow_incoherent_impl] #[stable(feature = "rust1", since = "1.0.0")] @@ -288,9 +293,10 @@ impl [T] { /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*)) /// worst-case, where the key function is *O*(*m*). /// - /// If `K: Ord` does not implement a total order the resulting order is unspecified. - /// All original elements will remain in the slice and any possible modifications via interior - /// mutability are observed in the input. Same is true if `K: Ord` panics. + /// If the implementation of [`Ord`] for `K` does not implement a [total order] the resulting + /// order of elements in the slice is unspecified. All original elements will remain in the + /// slice and any possible modifications via interior mutability are observed in the input. Same + /// is true if the implementation of [`Ord`] for `K` panics. /// /// # Current implementation /// @@ -303,18 +309,21 @@ impl [T] { /// handled without allocation, medium sized slices allocate `self.len()` and beyond that it /// clamps at `self.len() / 2`. /// - /// If `K: Ord` does not implement a total order, the implementation may panic. + /// # Panics + /// + /// May panic if the implementation of [`Ord`] for `K` does not implement a [total order]. /// /// # Examples /// /// ``` - /// let mut v = [-5i32, 4, 1, -3, 2]; + /// let mut v = [4i32, -5, 1, -3, 2]; /// /// v.sort_by_key(|k| k.abs()); - /// assert!(v == [1, 2, -3, 4, -5]); + /// assert_eq!(v, [1, 2, -3, 4, -5]); /// ``` /// /// [driftsort]: https://github.com/Voultapher/driftsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[cfg(not(no_global_oom_handling))] #[rustc_allow_incoherent_impl] #[stable(feature = "slice_sort_by_key", since = "1.7.0")] @@ -336,9 +345,10 @@ impl [T] { /// storage to remember the results of key evaluation. The order of calls to the key function is /// unspecified and may change in future versions of the standard library. /// - /// If `K: Ord` does not implement a total order the resulting order is unspecified. - /// All original elements will remain in the slice and any possible modifications via interior - /// mutability are observed in the input. Same is true if `K: Ord` panics. + /// If the implementation of [`Ord`] for `K` does not implement a [total order] the resulting + /// order of elements in the slice is unspecified. All original elements will remain in the + /// slice and any possible modifications via interior mutability are observed in the input. Same + /// is true if the implementation of [`Ord`] for `K` panics. /// /// For simple key functions (e.g., functions that are property accesses or basic operations), /// [`sort_by_key`](slice::sort_by_key) is likely to be faster. @@ -355,16 +365,22 @@ impl [T] { /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the /// length of the slice. /// + /// # Panics + /// + /// May panic if the implementation of [`Ord`] for `K` does not implement a [total order]. + /// /// # Examples /// /// ``` - /// let mut v = [-5i32, 4, 32, -3, 2]; + /// let mut v = [4i32, -5, 1, -3, 2, 10]; /// + /// // Strings are sorted by lexicographical order. /// v.sort_by_cached_key(|k| k.to_string()); - /// assert!(v == [-3, -5, 2, 32, 4]); + /// assert_eq!(v, [-3, -5, 1, 10, 2, 4]); /// ``` /// /// [ipnsort]: https://github.com/Voultapher/sort-research-rs/tree/main/ipnsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[cfg(not(no_global_oom_handling))] #[rustc_allow_incoherent_impl] #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")] diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index 3ad0dae77dbde..2c0d19b0ada09 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -20,7 +20,7 @@ use core::marker::{PhantomData, Unsize}; use core::mem::{self, align_of_val_raw, ManuallyDrop}; use core::ops::{CoerceUnsized, Deref, DerefPure, DispatchFromDyn, Receiver}; use core::panic::{RefUnwindSafe, UnwindSafe}; -use core::pin::Pin; +use core::pin::{Pin, PinCoerceUnsized}; use core::ptr::{self, NonNull}; #[cfg(not(no_global_oom_handling))] use core::slice::from_raw_parts_mut; @@ -2142,6 +2142,12 @@ impl Deref for Arc { } } +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for Arc {} + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for Weak {} + #[unstable(feature = "deref_pure_trait", issue = "87121")] unsafe impl DerefPure for Arc {} diff --git a/library/alloc/tests/arc.rs b/library/alloc/tests/arc.rs index c37a80dca95c8..dc27c578b57ef 100644 --- a/library/alloc/tests/arc.rs +++ b/library/alloc/tests/arc.rs @@ -227,3 +227,17 @@ fn make_mut_unsized() { assert_eq!(*data, [11, 21, 31]); assert_eq!(*other_data, [110, 20, 30]); } + +#[allow(unused)] +mod pin_coerce_unsized { + use alloc::sync::Arc; + use core::pin::Pin; + + pub trait MyTrait {} + impl MyTrait for String {} + + // Pin coercion should work for Arc + pub fn pin_arc(arg: Pin>) -> Pin> { + arg + } +} diff --git a/library/alloc/tests/boxed.rs b/library/alloc/tests/boxed.rs index 4cacee0414d7d..faee64b2f6738 100644 --- a/library/alloc/tests/boxed.rs +++ b/library/alloc/tests/boxed.rs @@ -179,3 +179,40 @@ unsafe impl Allocator for ConstAllocator { self } } + +#[allow(unused)] +mod pin_coerce_unsized { + use alloc::boxed::Box; + use core::pin::Pin; + + trait MyTrait { + fn action(&self) -> &str; + } + impl MyTrait for String { + fn action(&self) -> &str { + &*self + } + } + struct MyStruct; + impl MyTrait for MyStruct { + fn action(&self) -> &str { + "MyStruct" + } + } + + // Pin coercion should work for Box + fn pin_box(arg: Pin>) -> Pin> { + arg + } + + #[test] + fn pin_coerce_unsized_box() { + let my_string = "my string"; + let a_string = Box::pin(String::from(my_string)); + let pin_box_str = pin_box(a_string); + assert_eq!(pin_box_str.as_ref().action(), my_string); + let a_struct = Box::pin(MyStruct); + let pin_box_struct = pin_box(a_struct); + assert_eq!(pin_box_struct.as_ref().action(), "MyStruct"); + } +} diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs index 89538f272f069..3d4add6fae452 100644 --- a/library/alloc/tests/lib.rs +++ b/library/alloc/tests/lib.rs @@ -40,6 +40,7 @@ #![feature(drain_keep_rest)] #![feature(local_waker)] #![feature(vec_pop_if)] +#![feature(unique_rc_arc)] #![allow(internal_features)] #![deny(fuzzy_provenance_casts)] #![deny(unsafe_op_in_unsafe_fn)] diff --git a/library/alloc/tests/rc.rs b/library/alloc/tests/rc.rs index 499740e738ab0..29dbdcf225eb5 100644 --- a/library/alloc/tests/rc.rs +++ b/library/alloc/tests/rc.rs @@ -205,3 +205,20 @@ fn weak_may_dangle() { // `val` dropped here while still borrowed // borrow might be used here, when `val` is dropped and runs the `Drop` code for type `std::rc::Weak` } + +#[allow(unused)] +mod pin_coerce_unsized { + use alloc::rc::{Rc, UniqueRc}; + use core::pin::Pin; + + pub trait MyTrait {} + impl MyTrait for String {} + + // Pin coercion should work for Rc + pub fn pin_rc(arg: Pin>) -> Pin> { + arg + } + pub fn pin_unique_rc(arg: Pin>) -> Pin> { + arg + } +} diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index 0d66c2b52c84e..d860f3415d982 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -255,6 +255,7 @@ use crate::fmt::{self, Debug, Display}; use crate::marker::{PhantomData, Unsize}; use crate::mem; use crate::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn}; +use crate::pin::PinCoerceUnsized; use crate::ptr::{self, NonNull}; mod lazy; @@ -2396,3 +2397,21 @@ fn assert_coerce_unsized( let _: Cell<&dyn Send> = c; let _: RefCell<&dyn Send> = d; } + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for UnsafeCell {} + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for SyncUnsafeCell {} + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for Cell {} + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for RefCell {} + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl<'b, T: ?Sized> PinCoerceUnsized for Ref<'b, T> {} + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl<'b, T: ?Sized> PinCoerceUnsized for RefMut<'b, T> {} diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs index d752151d10cc8..0569b8b762433 100644 --- a/library/core/src/pin.rs +++ b/library/core/src/pin.rs @@ -1715,10 +1715,56 @@ impl fmt::Pointer for Pin { // for other reasons, though, so we just need to take care not to allow such // impls to land in std. #[stable(feature = "pin", since = "1.33.0")] -impl CoerceUnsized> for Pin where Ptr: CoerceUnsized {} +impl CoerceUnsized> for Pin +where + Ptr: CoerceUnsized + PinCoerceUnsized, + U: PinCoerceUnsized, +{ +} + +#[stable(feature = "pin", since = "1.33.0")] +impl DispatchFromDyn> for Pin +where + Ptr: DispatchFromDyn + PinCoerceUnsized, + U: PinCoerceUnsized, +{ +} + +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +/// Trait that indicates that this is a pointer or a wrapper for one, where +/// unsizing can be performed on the pointee when it is pinned. +/// +/// # Safety +/// +/// If this type implements `Deref`, then the concrete type returned by `deref` +/// and `deref_mut` must not change without a modification. The following +/// operations are not considered modifications: +/// +/// * Moving the pointer. +/// * Performing unsizing coercions on the pointer. +/// * Performing dynamic dispatch with the pointer. +/// * Calling `deref` or `deref_mut` on the pointer. +/// +/// The concrete type of a trait object is the type that the vtable corresponds +/// to. The concrete type of a slice is an array of the same element type and +/// the length specified in the metadata. The concrete type of a sized type +/// is the type itself. +pub unsafe trait PinCoerceUnsized {} + +#[stable(feature = "pin", since = "1.33.0")] +unsafe impl<'a, T: ?Sized> PinCoerceUnsized for &'a T {} + +#[stable(feature = "pin", since = "1.33.0")] +unsafe impl<'a, T: ?Sized> PinCoerceUnsized for &'a mut T {} + +#[stable(feature = "pin", since = "1.33.0")] +unsafe impl PinCoerceUnsized for Pin {} + +#[stable(feature = "pin", since = "1.33.0")] +unsafe impl PinCoerceUnsized for *const T {} #[stable(feature = "pin", since = "1.33.0")] -impl DispatchFromDyn> for Pin where Ptr: DispatchFromDyn {} +unsafe impl PinCoerceUnsized for *mut T {} /// Constructs a [Pin]<[&mut] T>, by pinning a `value: T` locally. /// diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index 4a716a7503964..44db227b79efd 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -3,6 +3,7 @@ use crate::marker::Unsize; use crate::mem::{MaybeUninit, SizedTypeProperties}; use crate::num::NonZero; use crate::ops::{CoerceUnsized, DispatchFromDyn}; +use crate::pin::PinCoerceUnsized; use crate::ptr::Unique; use crate::slice::{self, SliceIndex}; use crate::ub_checks::assert_unsafe_precondition; @@ -1724,6 +1725,9 @@ impl CoerceUnsized> for NonNull where T: Uns #[unstable(feature = "dispatch_from_dyn", issue = "none")] impl DispatchFromDyn> for NonNull where T: Unsize {} +#[stable(feature = "pin", since = "1.33.0")] +unsafe impl PinCoerceUnsized for NonNull {} + #[stable(feature = "nonnull", since = "1.25.0")] impl fmt::Debug for NonNull { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs index b74d691e45427..4810ebe01f9bb 100644 --- a/library/core/src/ptr/unique.rs +++ b/library/core/src/ptr/unique.rs @@ -1,6 +1,7 @@ use crate::fmt; use crate::marker::{PhantomData, Unsize}; use crate::ops::{CoerceUnsized, DispatchFromDyn}; +use crate::pin::PinCoerceUnsized; use crate::ptr::NonNull; /// A wrapper around a raw non-null `*mut T` that indicates that the possessor @@ -166,6 +167,9 @@ impl CoerceUnsized> for Unique where T: Unsiz #[unstable(feature = "ptr_internals", issue = "none")] impl DispatchFromDyn> for Unique where T: Unsize {} +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for Unique {} + #[unstable(feature = "ptr_internals", issue = "none")] impl fmt::Debug for Unique { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index b1440214d795a..cb93359fb8811 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -28,6 +28,7 @@ pub mod memchr; issue = "none", reason = "exposed from core to be reused in std;" )] +#[doc(hidden)] pub mod sort; mod ascii; @@ -2880,9 +2881,19 @@ impl [T] { /// This sort is unstable (i.e., may reorder equal elements), in-place (i.e., does not /// allocate), and *O*(*n* \* log(*n*)) worst-case. /// - /// If `T: Ord` does not implement a total order the resulting order is unspecified. All - /// original elements will remain in the slice and any possible modifications via interior - /// mutability are observed in the input. Same is true if `T: Ord` panics. + /// If the implementation of [`Ord`] for `T` does not implement a [total order] the resulting + /// order of elements in the slice is unspecified. All original elements will remain in the + /// slice and any possible modifications via interior mutability are observed in the input. Same + /// is true if the implementation of [`Ord`] for `T` panics. + /// + /// Sorting types that only implement [`PartialOrd`] such as [`f32`] and [`f64`] require + /// additional precautions. For example, `f32::NAN != f32::NAN`, which doesn't fulfill the + /// reflexivity requirement of [`Ord`]. By using an alternative comparison function with + /// [`slice::sort_unstable_by`] such as [`f32::total_cmp`] or [`f64::total_cmp`] that defines a + /// [total order] users can sort slices containing floating-point values. Alternatively, if all + /// values in the slice are guaranteed to be in a subset for which [`PartialOrd::partial_cmp`] + /// forms a [total order], it's possible to sort the slice with `sort_unstable_by(|a, b| + /// a.partial_cmp(b).unwrap())`. /// /// # Current implementation /// @@ -2894,18 +2905,21 @@ impl [T] { /// It is typically faster than stable sorting, except in a few special cases, e.g., when the /// slice is partially sorted. /// - /// If `T: Ord` does not implement a total order, the implementation may panic. + /// # Panics + /// + /// May panic if the implementation of [`Ord`] for `T` does not implement a [total order]. /// /// # Examples /// /// ``` - /// let mut v = [-5, 4, 1, -3, 2]; + /// let mut v = [4, -5, 1, -3, 2]; /// /// v.sort_unstable(); - /// assert!(v == [-5, -3, 1, 2, 4]); + /// assert_eq!(v, [-5, -3, 1, 2, 4]); /// ``` /// /// [ipnsort]: https://github.com/Voultapher/sort-research-rs/tree/main/ipnsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[stable(feature = "sort_unstable", since = "1.20.0")] #[inline] pub fn sort_unstable(&mut self) @@ -2915,31 +2929,20 @@ impl [T] { sort::unstable::sort(self, &mut T::lt); } - /// Sorts the slice with a comparator function, **without** preserving the initial order of + /// Sorts the slice with a comparison function, **without** preserving the initial order of /// equal elements. /// /// This sort is unstable (i.e., may reorder equal elements), in-place (i.e., does not /// allocate), and *O*(*n* \* log(*n*)) worst-case. /// - /// The comparator function should define a total ordering for the elements in the slice. If the - /// ordering is not total, the order of the elements is unspecified. - /// - /// If the comparator function does not implement a total order the resulting order is - /// unspecified. All original elements will remain in the slice and any possible modifications - /// via interior mutability are observed in the input. Same is true if the comparator function - /// panics. A total order (for all `a`, `b` and `c`): - /// - /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and - /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`. + /// If the comparison function `compare` does not implement a [total order] the resulting order + /// of elements in the slice is unspecified. All original elements will remain in the slice and + /// any possible modifications via interior mutability are observed in the input. Same is true + /// if `compare` panics. /// - /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use - /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`. - /// - /// ``` - /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0]; - /// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap()); - /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]); - /// ``` + /// For example `|a, b| (a - b).cmp(a)` is a comparison function that is neither transitive nor + /// reflexive nor total, `a < b < c < a` with `a = 1, b = 2, c = 3`. For more information and + /// examples see the [`Ord`] documentation. /// /// # Current implementation /// @@ -2951,21 +2954,24 @@ impl [T] { /// It is typically faster than stable sorting, except in a few special cases, e.g., when the /// slice is partially sorted. /// - /// If `T: Ord` does not implement a total order, the implementation may panic. + /// # Panics + /// + /// May panic if `compare` does not implement a [total order]. /// /// # Examples /// /// ``` - /// let mut v = [5, 4, 1, 3, 2]; + /// let mut v = [4, -5, 1, -3, 2]; /// v.sort_unstable_by(|a, b| a.cmp(b)); - /// assert!(v == [1, 2, 3, 4, 5]); + /// assert_eq!(v, [-5, -3, 1, 2, 4]); /// /// // reverse sorting /// v.sort_unstable_by(|a, b| b.cmp(a)); - /// assert!(v == [5, 4, 3, 2, 1]); + /// assert_eq!(v, [4, 2, 1, -3, -5]); /// ``` /// /// [ipnsort]: https://github.com/Voultapher/sort-research-rs/tree/main/ipnsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[stable(feature = "sort_unstable", since = "1.20.0")] #[inline] pub fn sort_unstable_by(&mut self, mut compare: F) @@ -2981,9 +2987,10 @@ impl [T] { /// This sort is unstable (i.e., may reorder equal elements), in-place (i.e., does not /// allocate), and *O*(*n* \* log(*n*)) worst-case. /// - /// If `K: Ord` does not implement a total order the resulting order is unspecified. - /// All original elements will remain in the slice and any possible modifications via interior - /// mutability are observed in the input. Same is true if `K: Ord` panics. + /// If the implementation of [`Ord`] for `K` does not implement a [total order] the resulting + /// order of elements in the slice is unspecified. All original elements will remain in the + /// slice and any possible modifications via interior mutability are observed in the input. Same + /// is true if the implementation of [`Ord`] for `K` panics. /// /// # Current implementation /// @@ -2995,18 +3002,21 @@ impl [T] { /// It is typically faster than stable sorting, except in a few special cases, e.g., when the /// slice is partially sorted. /// - /// If `K: Ord` does not implement a total order, the implementation may panic. + /// # Panics + /// + /// May panic if the implementation of [`Ord`] for `K` does not implement a [total order]. /// /// # Examples /// /// ``` - /// let mut v = [-5i32, 4, 1, -3, 2]; + /// let mut v = [4i32, -5, 1, -3, 2]; /// /// v.sort_unstable_by_key(|k| k.abs()); - /// assert!(v == [1, 2, -3, 4, -5]); + /// assert_eq!(v, [1, 2, -3, 4, -5]); /// ``` /// /// [ipnsort]: https://github.com/Voultapher/sort-research-rs/tree/main/ipnsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[stable(feature = "sort_unstable", since = "1.20.0")] #[inline] pub fn sort_unstable_by_key(&mut self, mut f: F) @@ -3038,15 +3048,14 @@ impl [T] { /// Median of Medians using Tukey's Ninther for pivot selection, which guarantees linear runtime /// for all inputs. /// - /// It is typically faster than stable sorting, except in a few special cases, e.g., when the - /// slice is nearly fully sorted, where `slice::sort` may be faster. - /// /// [`sort_unstable`]: slice::sort_unstable /// /// # Panics /// /// Panics when `index >= len()`, meaning it always panics on empty slices. /// + /// May panic if the implementation of [`Ord`] for `T` does not implement a [total order]. + /// /// # Examples /// /// ``` @@ -3069,6 +3078,7 @@ impl [T] { /// ``` /// /// [ipnsort]: https://github.com/Voultapher/sort-research-rs/tree/main/ipnsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")] #[inline] pub fn select_nth_unstable(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T]) @@ -3099,15 +3109,14 @@ impl [T] { /// Median of Medians using Tukey's Ninther for pivot selection, which guarantees linear runtime /// for all inputs. /// - /// It is typically faster than stable sorting, except in a few special cases, e.g., when the - /// slice is nearly fully sorted, where `slice::sort` may be faster. - /// /// [`sort_unstable`]: slice::sort_unstable /// /// # Panics /// /// Panics when `index >= len()`, meaning it always panics on empty slices. /// + /// May panic if `compare` does not implement a [total order]. + /// /// # Examples /// /// ``` @@ -3130,6 +3139,7 @@ impl [T] { /// ``` /// /// [ipnsort]: https://github.com/Voultapher/sort-research-rs/tree/main/ipnsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")] #[inline] pub fn select_nth_unstable_by( @@ -3164,15 +3174,14 @@ impl [T] { /// Median of Medians using Tukey's Ninther for pivot selection, which guarantees linear runtime /// for all inputs. /// - /// It is typically faster than stable sorting, except in a few special cases, e.g., when the - /// slice is nearly fully sorted, where `slice::sort` may be faster. - /// /// [`sort_unstable`]: slice::sort_unstable /// /// # Panics /// /// Panics when `index >= len()`, meaning it always panics on empty slices. /// + /// May panic if `K: Ord` does not implement a total order. + /// /// # Examples /// /// ``` @@ -3195,6 +3204,7 @@ impl [T] { /// ``` /// /// [ipnsort]: https://github.com/Voultapher/sort-research-rs/tree/main/ipnsort + /// [total order]: https://en.wikipedia.org/wiki/Total_order #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")] #[inline] pub fn select_nth_unstable_by_key( diff --git a/library/core/src/slice/sort/shared/smallsort.rs b/library/core/src/slice/sort/shared/smallsort.rs index 5064c5a0ae55a..db0c5c72822c0 100644 --- a/library/core/src/slice/sort/shared/smallsort.rs +++ b/library/core/src/slice/sort/shared/smallsort.rs @@ -831,9 +831,9 @@ unsafe fn bidirectional_merge bool>( right = right.add((!left_nonempty) as usize); } - // We now should have consumed the full input exactly once. This can - // only fail if the comparison operator fails to be Ord, in which case - // we will panic and never access the inconsistent state in dst. + // We now should have consumed the full input exactly once. This can only fail if the + // user-provided comparison function fails to implement a strict weak ordering. In that case + // we panic and never access the inconsistent state in dst. if left != left_end || right != right_end { panic_on_ord_violation(); } @@ -842,7 +842,21 @@ unsafe fn bidirectional_merge bool>( #[inline(never)] fn panic_on_ord_violation() -> ! { - panic!("Ord violation"); + // This is indicative of a logic bug in the user-provided comparison function or Ord + // implementation. They are expected to implement a total order as explained in the Ord + // documentation. + // + // By panicking we inform the user, that they have a logic bug in their program. If a strict + // weak ordering is not given, the concept of comparison based sorting cannot yield a sorted + // result. E.g.: a < b < c < a + // + // The Ord documentation requires users to implement a total order. Arguably that's + // unnecessarily strict in the context of sorting. Issues only arise if the weaker requirement + // of a strict weak ordering is violated. + // + // The panic message talks about a total order because that's what the Ord documentation talks + // about and requires, so as to not confuse users. + panic!("user-provided comparison function does not correctly implement a total order"); } #[must_use] diff --git a/library/core/src/slice/sort/unstable/mod.rs b/library/core/src/slice/sort/unstable/mod.rs index ed735e1ebfbc0..932e01f4401e5 100644 --- a/library/core/src/slice/sort/unstable/mod.rs +++ b/library/core/src/slice/sort/unstable/mod.rs @@ -8,7 +8,7 @@ use crate::slice::sort::shared::smallsort::insertion_sort_shift_left; pub(crate) mod heapsort; pub(crate) mod quicksort; -/// Unstable sort called ipnsort by Lukas Bergdoll. +/// Unstable sort called ipnsort by Lukas Bergdoll and Orson Peters. /// Design document: /// /// diff --git a/library/core/tests/pin.rs b/library/core/tests/pin.rs index 6f617c8d0c297..7a6af46a74323 100644 --- a/library/core/tests/pin.rs +++ b/library/core/tests/pin.rs @@ -29,3 +29,49 @@ fn pin_const() { pin_mut_const(); } + +#[allow(unused)] +mod pin_coerce_unsized { + use core::cell::{Cell, RefCell, UnsafeCell}; + use core::pin::Pin; + use core::ptr::NonNull; + + pub trait MyTrait {} + impl MyTrait for String {} + + // These Pins should continue to compile. + // Do note that these instances of Pin types cannot be used + // meaningfully because all methods require a Deref/DerefMut + // bounds on the pointer type and Cell, RefCell and UnsafeCell + // do not implement Deref/DerefMut. + + pub fn cell(arg: Pin>>) -> Pin>> { + arg + } + pub fn ref_cell(arg: Pin>>) -> Pin>> { + arg + } + pub fn unsafe_cell(arg: Pin>>) -> Pin>> { + arg + } + + // These sensible Pin coercions are possible. + pub fn pin_mut_ref(arg: Pin<&mut String>) -> Pin<&mut dyn MyTrait> { + arg + } + pub fn pin_ref(arg: Pin<&String>) -> Pin<&dyn MyTrait> { + arg + } + pub fn pin_ptr(arg: Pin<*const String>) -> Pin<*const dyn MyTrait> { + arg + } + pub fn pin_ptr_mut(arg: Pin<*mut String>) -> Pin<*mut dyn MyTrait> { + arg + } + pub fn pin_non_null(arg: Pin>) -> Pin> { + arg + } + pub fn nesting_pins(arg: Pin>) -> Pin> { + arg + } +} diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs index f11dd50c5e2b7..0b12e5777c840 100644 --- a/library/std/src/io/buffered/bufreader.rs +++ b/library/std/src/io/buffered/bufreader.rs @@ -94,6 +94,40 @@ impl BufReader { pub fn with_capacity(capacity: usize, inner: R) -> BufReader { BufReader { inner, buf: Buffer::with_capacity(capacity) } } + + /// Attempt to look ahead `n` bytes. + /// + /// `n` must be less than `capacity`. + /// + /// ## Examples + /// + /// ```rust + /// #![feature(bufreader_peek)] + /// use std::io::{Read, BufReader}; + /// + /// let mut bytes = &b"oh, hello"[..]; + /// let mut rdr = BufReader::with_capacity(6, &mut bytes); + /// assert_eq!(rdr.peek(2).unwrap(), b"oh"); + /// let mut buf = [0; 4]; + /// rdr.read(&mut buf[..]).unwrap(); + /// assert_eq!(&buf, b"oh, "); + /// assert_eq!(rdr.peek(2).unwrap(), b"he"); + /// let mut s = String::new(); + /// rdr.read_to_string(&mut s).unwrap(); + /// assert_eq!(&s, "hello"); + /// ``` + #[unstable(feature = "bufreader_peek", issue = "128405")] + pub fn peek(&mut self, n: usize) -> io::Result<&[u8]> { + assert!(n <= self.capacity()); + while n > self.buf.buffer().len() { + if self.buf.pos() > 0 { + self.buf.backshift(); + } + self.buf.read_more(&mut self.inner)?; + debug_assert_eq!(self.buf.pos(), 0); + } + Ok(&self.buf.buffer()[..n]) + } } impl BufReader { diff --git a/library/std/src/io/buffered/bufreader/buffer.rs b/library/std/src/io/buffered/bufreader/buffer.rs index 796137c0123e7..ccd67fafb45b4 100644 --- a/library/std/src/io/buffered/bufreader/buffer.rs +++ b/library/std/src/io/buffered/bufreader/buffer.rs @@ -97,6 +97,27 @@ impl Buffer { self.pos = self.pos.saturating_sub(amt); } + /// Read more bytes into the buffer without discarding any of its contents + pub fn read_more(&mut self, mut reader: impl Read) -> io::Result<()> { + let mut buf = BorrowedBuf::from(&mut self.buf[self.pos..]); + let old_init = self.initialized - self.pos; + unsafe { + buf.set_init(old_init); + } + reader.read_buf(buf.unfilled())?; + self.filled += buf.len(); + self.initialized += buf.init_len() - old_init; + Ok(()) + } + + /// Remove bytes that have already been read from the buffer. + pub fn backshift(&mut self) { + self.buf.copy_within(self.pos.., 0); + self.initialized -= self.pos; + self.filled -= self.pos; + self.pos = 0; + } + #[inline] pub fn fill_buf(&mut self, mut reader: impl Read) -> io::Result<&[u8]> { // If we've reached the end of our internal buffer then we need to fetch diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 05e33d47bac39..67405d788bd9b 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -339,6 +339,7 @@ #![feature(maybe_uninit_write_slice)] #![feature(panic_can_unwind)] #![feature(panic_internals)] +#![feature(pin_coerce_unsized_trait)] #![feature(pointer_is_aligned_to)] #![feature(portable_simd)] #![feature(prelude_2024)] diff --git a/library/std/src/os/vxworks/mod.rs b/library/std/src/os/vxworks/mod.rs index 0a7ac641dd3e1..b09aa72f72693 100644 --- a/library/std/src/os/vxworks/mod.rs +++ b/library/std/src/os/vxworks/mod.rs @@ -1,6 +1,7 @@ //! VxWorks-specific definitions #![stable(feature = "raw_ext", since = "1.1.0")] +#![forbid(unsafe_op_in_unsafe_fn)] pub mod fs; pub mod raw; diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs index 298095257396a..5069ab82ccc90 100644 --- a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs +++ b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs @@ -8,6 +8,7 @@ use crate::cell::UnsafeCell; use crate::convert::TryInto; use crate::mem::{self, ManuallyDrop}; use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut}; +use crate::pin::PinCoerceUnsized; use crate::ptr::{self, NonNull}; use crate::slice::SliceIndex; use crate::{cmp, intrinsics, slice}; @@ -751,6 +752,9 @@ where #[unstable(feature = "sgx_platform", issue = "56975")] impl, U> CoerceUnsized> for UserRef {} +#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")] +unsafe impl PinCoerceUnsized for UserRef {} + #[unstable(feature = "sgx_platform", issue = "56975")] impl Index for UserRef<[T]> where diff --git a/library/std/src/sys/pal/unix/process/process_vxworks.rs b/library/std/src/sys/pal/unix/process/process_vxworks.rs index 6a9d8fab1d412..0477b3d9a70da 100644 --- a/library/std/src/sys/pal/unix/process/process_vxworks.rs +++ b/library/std/src/sys/pal/unix/process/process_vxworks.rs @@ -1,3 +1,4 @@ +#![forbid(unsafe_op_in_unsafe_fn)] use libc::{self, c_char, c_int, RTP_ID}; use crate::io::{self, ErrorKind}; diff --git a/src/tools/miri/src/concurrency/thread.rs b/src/tools/miri/src/concurrency/thread.rs index a53dd7eac1e9b..f72591f0c4bdf 100644 --- a/src/tools/miri/src/concurrency/thread.rs +++ b/src/tools/miri/src/concurrency/thread.rs @@ -256,7 +256,7 @@ pub struct Thread<'tcx> { /// which then forwards it to 'Resume'. However this argument is implicit in MIR, /// so we have to store it out-of-band. When there are multiple active unwinds, /// the innermost one is always caught first, so we can store them as a stack. - pub(crate) panic_payloads: Vec, + pub(crate) panic_payloads: Vec>, /// Last OS error location in memory. It is a 32-bit integer. pub(crate) last_error: Option>, @@ -377,10 +377,6 @@ impl VisitProvenance for Frame<'_, Provenance, FrameExtra<'_>> { return_place, locals, extra, - body: _, - instance: _, - return_to_block: _, - loc: _, // There are some private fields we cannot access; they contain no tags. .. } = self; @@ -952,7 +948,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( instance, start_abi, - &[*func_arg], + &[func_arg], Some(&ret_place), StackPopCleanup::Root { cleanup: true }, )?; diff --git a/src/tools/miri/src/eval.rs b/src/tools/miri/src/eval.rs index 53e877517089c..1173da4697564 100644 --- a/src/tools/miri/src/eval.rs +++ b/src/tools/miri/src/eval.rs @@ -307,7 +307,8 @@ pub fn create_ecx<'tcx>( // First argument is constructed later, because it's skipped if the entry function uses #[start]. // Second argument (argc): length of `config.args`. - let argc = Scalar::from_target_usize(u64::try_from(config.args.len()).unwrap(), &ecx); + let argc = + ImmTy::from_int(i64::try_from(config.args.len()).unwrap(), ecx.machine.layouts.isize); // Third argument (`argv`): created from `config.args`. let argv = { // Put each argument in memory, collect pointers. @@ -334,13 +335,11 @@ pub fn create_ecx<'tcx>( ecx.write_immediate(arg, &place)?; } ecx.mark_immutable(&argvs_place); - // A pointer to that place is the 3rd argument for main. - let argv = argvs_place.to_ref(&ecx); // Store `argc` and `argv` for macOS `_NSGetArg{c,v}`. { let argc_place = ecx.allocate(ecx.machine.layouts.isize, MiriMemoryKind::Machine.into())?; - ecx.write_scalar(argc, &argc_place)?; + ecx.write_immediate(*argc, &argc_place)?; ecx.mark_immutable(&argc_place); ecx.machine.argc = Some(argc_place.ptr()); @@ -348,7 +347,7 @@ pub fn create_ecx<'tcx>( ecx.layout_of(Ty::new_imm_ptr(tcx, tcx.types.unit))?, MiriMemoryKind::Machine.into(), )?; - ecx.write_immediate(argv, &argv_place)?; + ecx.write_pointer(argvs_place.ptr(), &argv_place)?; ecx.mark_immutable(&argv_place); ecx.machine.argv = Some(argv_place.ptr()); } @@ -369,7 +368,7 @@ pub fn create_ecx<'tcx>( } ecx.mark_immutable(&cmd_place); } - argv + ecx.mplace_to_ref(&argvs_place)? }; // Return place (in static memory so that it does not count as leak). @@ -405,10 +404,14 @@ pub fn create_ecx<'tcx>( start_instance, Abi::Rust, &[ - Scalar::from_pointer(main_ptr, &ecx).into(), - argc.into(), + ImmTy::from_scalar( + Scalar::from_pointer(main_ptr, &ecx), + // FIXME use a proper fn ptr type + ecx.machine.layouts.const_raw_ptr, + ), + argc, argv, - Scalar::from_u8(sigpipe).into(), + ImmTy::from_uint(sigpipe, ecx.machine.layouts.u8), ], Some(&ret_place), StackPopCleanup::Root { cleanup: true }, @@ -418,7 +421,7 @@ pub fn create_ecx<'tcx>( ecx.call_function( entry_instance, Abi::Rust, - &[argc.into(), argv], + &[argc, argv], Some(&ret_place), StackPopCleanup::Root { cleanup: true }, )?; diff --git a/src/tools/miri/src/helpers.rs b/src/tools/miri/src/helpers.rs index 8bc8188f05314..1bdf9f06dcdb0 100644 --- a/src/tools/miri/src/helpers.rs +++ b/src/tools/miri/src/helpers.rs @@ -12,13 +12,14 @@ use rustc_apfloat::Float; use rustc_hir::{ def::{DefKind, Namespace}, def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}, + Safety, }; use rustc_index::IndexVec; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::middle::dependency_format::Linkage; use rustc_middle::middle::exported_symbols::ExportedSymbol; use rustc_middle::mir; -use rustc_middle::ty::layout::MaybeResult; +use rustc_middle::ty::layout::{FnAbiOf, MaybeResult}; use rustc_middle::ty::{ self, layout::{LayoutOf, TyAndLayout}, @@ -492,48 +493,38 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { &mut self, f: ty::Instance<'tcx>, caller_abi: Abi, - args: &[Immediate], + args: &[ImmTy<'tcx>], dest: Option<&MPlaceTy<'tcx>>, stack_pop: StackPopCleanup, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let param_env = ty::ParamEnv::reveal_all(); // in Miri this is always the param_env we use... and this.param_env is private. - let callee_abi = f.ty(*this.tcx, param_env).fn_sig(*this.tcx).abi(); - if callee_abi != caller_abi { - throw_ub_format!( - "calling a function with ABI {} using caller ABI {}", - callee_abi.name(), - caller_abi.name() - ) - } - // Push frame. + // Get MIR. let mir = this.load_mir(f.def, None)?; let dest = match dest { Some(dest) => dest.clone(), None => MPlaceTy::fake_alloc_zst(this.layout_of(mir.return_ty())?), }; - this.push_stack_frame(f, mir, &dest, stack_pop)?; - - // Initialize arguments. - let mut callee_args = this.frame().body.args_iter(); - for arg in args { - let local = callee_args - .next() - .ok_or_else(|| err_ub_format!("callee has fewer arguments than expected"))?; - // Make the local live, and insert the initial value. - this.storage_live(local)?; - let callee_arg = this.local_to_place(local)?; - this.write_immediate(*arg, &callee_arg)?; - } - if callee_args.next().is_some() { - throw_ub_format!("callee has more arguments than expected"); - } - - // Initialize remaining locals. - this.storage_live_for_always_live_locals()?; - Ok(()) + // Construct a function pointer type representing the caller perspective. + let sig = this.tcx.mk_fn_sig( + args.iter().map(|a| a.layout.ty), + dest.layout.ty, + /*c_variadic*/ false, + Safety::Safe, + caller_abi, + ); + let caller_fn_abi = this.fn_abi_of_fn_ptr(ty::Binder::dummy(sig), ty::List::empty())?; + + this.init_stack_frame( + f, + mir, + caller_fn_abi, + &args.iter().map(|a| FnArg::Copy(a.clone().into())).collect::>(), + /*with_caller_location*/ false, + &dest, + stack_pop, + ) } /// Visits the memory covered by `place`, sensitive to freezing: the 2nd parameter @@ -1114,12 +1105,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Make an attempt to get at the instance of the function this is inlined from. let instance: Option<_> = try { let scope = frame.current_source_info()?.scope; - let inlined_parent = frame.body.source_scopes[scope].inlined_parent_scope?; - let source = &frame.body.source_scopes[inlined_parent]; + let inlined_parent = frame.body().source_scopes[scope].inlined_parent_scope?; + let source = &frame.body().source_scopes[inlined_parent]; source.inlined.expect("inlined_parent_scope points to scope without inline info").0 }; // Fall back to the instance of the function itself. - let instance = instance.unwrap_or(frame.instance); + let instance = instance.unwrap_or(frame.instance()); // Now check the crate it is in. We could try to be clever here and e.g. check if this is // the same crate as `start_fn`, but that would not work for running std tests in Miri, so // we'd need some more hacks anyway. So we just check the name of the crate. If someone @@ -1359,9 +1350,9 @@ impl<'tcx> MiriMachine<'tcx> { /// This is the source of truth for the `is_user_relevant` flag in our `FrameExtra`. pub fn is_user_relevant(&self, frame: &Frame<'tcx, Provenance>) -> bool { - let def_id = frame.instance.def_id(); + let def_id = frame.instance().def_id(); (def_id.is_local() || self.local_crates.contains(&def_id.krate)) - && !frame.instance.def.requires_caller_location(self.tcx) + && !frame.instance().def.requires_caller_location(self.tcx) } } diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs index 3e45a3a7e1a12..94598e7d2e3cc 100644 --- a/src/tools/miri/src/machine.rs +++ b/src/tools/miri/src/machine.rs @@ -1352,7 +1352,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> { // Start recording our event before doing anything else let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() { - let fn_name = frame.instance.to_string(); + let fn_name = frame.instance().to_string(); let entry = ecx.machine.string_cache.entry(fn_name.clone()); let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name)); @@ -1443,7 +1443,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our // concurrency and what it prints is just plain wrong. So we print our own information // instead. (Cc https://github.com/rust-lang/miri/issues/2266) - info!("Leaving {}", ecx.frame().instance); + info!("Leaving {}", ecx.frame().instance()); Ok(()) } @@ -1473,7 +1473,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { // Needs to be done after dropping frame to show up on the right nesting level. // (Cc https://github.com/rust-lang/miri/issues/2266) if !ecx.active_thread_stack().is_empty() { - info!("Continuing in {}", ecx.frame().instance); + info!("Continuing in {}", ecx.frame().instance()); } res } @@ -1486,7 +1486,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else { panic!("after_local_allocated should only be called on fresh allocations"); }; - let local_decl = &ecx.frame().body.local_decls[local]; + let local_decl = &ecx.frame().body().local_decls[local]; let span = local_decl.source_info.span; ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None)); Ok(()) diff --git a/src/tools/miri/src/shims/backtrace.rs b/src/tools/miri/src/shims/backtrace.rs index 42babb4c78dc5..9bb6777a9b047 100644 --- a/src/tools/miri/src/shims/backtrace.rs +++ b/src/tools/miri/src/shims/backtrace.rs @@ -46,8 +46,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let mut data = Vec::new(); for frame in this.active_thread_stack().iter().rev() { // Match behavior of debuginfo (`FunctionCx::adjusted_span_and_dbg_scope`). - let span = hygiene::walk_chain_collapsed(frame.current_span(), frame.body.span); - data.push((frame.instance, span.lo())); + let span = hygiene::walk_chain_collapsed(frame.current_span(), frame.body().span); + data.push((frame.instance(), span.lo())); } let ptrs: Vec<_> = data diff --git a/src/tools/miri/src/shims/panic.rs b/src/tools/miri/src/shims/panic.rs index 306dce5edcd3d..ab705ddccab6e 100644 --- a/src/tools/miri/src/shims/panic.rs +++ b/src/tools/miri/src/shims/panic.rs @@ -25,7 +25,7 @@ pub struct CatchUnwindData<'tcx> { /// The `catch_fn` callback to call in case of a panic. catch_fn: Pointer, /// The `data` argument for that callback. - data: Scalar, + data: ImmTy<'tcx>, /// The return place from the original call to `try`. dest: MPlaceTy<'tcx>, /// The return block from the original call to `try`. @@ -48,9 +48,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { fn handle_miri_start_unwind(&mut self, payload: &OpTy<'tcx>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - trace!("miri_start_unwind: {:?}", this.frame().instance); + trace!("miri_start_unwind: {:?}", this.frame().instance()); - let payload = this.read_scalar(payload)?; + let payload = this.read_immediate(payload)?; let thread = this.active_thread_mut(); thread.panic_payloads.push(payload); @@ -80,7 +80,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Get all the arguments. let [try_fn, data, catch_fn] = check_arg_count(args)?; let try_fn = this.read_pointer(try_fn)?; - let data = this.read_scalar(data)?; + let data = this.read_immediate(data)?; let catch_fn = this.read_pointer(catch_fn)?; // Now we make a function call, and pass `data` as first and only argument. @@ -89,7 +89,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( f_instance, Abi::Rust, - &[data.into()], + &[data.clone()], None, // Directly return to caller. StackPopCleanup::Goto { ret, unwind: mir::UnwindAction::Continue }, @@ -124,7 +124,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // and we are unwinding, so we should catch that. trace!( "unwinding: found catch_panic frame during unwinding: {:?}", - this.frame().instance + this.frame().instance() ); // We set the return value of `try` to 1, since there was a panic. @@ -140,7 +140,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( f_instance, Abi::Rust, - &[catch_unwind.data.into(), payload.into()], + &[catch_unwind.data, payload], None, // Directly return to caller of `try`. StackPopCleanup::Goto { @@ -169,7 +169,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( panic, Abi::Rust, - &[msg.to_ref(this)], + &[this.mplace_to_ref(&msg)?], None, StackPopCleanup::Goto { ret: None, unwind }, ) @@ -188,7 +188,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( panic, Abi::Rust, - &[msg.to_ref(this)], + &[this.mplace_to_ref(&msg)?], None, StackPopCleanup::Goto { ret: None, unwind: mir::UnwindAction::Unreachable }, ) @@ -207,9 +207,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Forward to `panic_bounds_check` lang item. // First arg: index. - let index = this.read_scalar(&this.eval_operand(index, None)?)?; + let index = this.read_immediate(&this.eval_operand(index, None)?)?; // Second arg: len. - let len = this.read_scalar(&this.eval_operand(len, None)?)?; + let len = this.read_immediate(&this.eval_operand(len, None)?)?; // Call the lang item. let panic_bounds_check = this.tcx.lang_items().panic_bounds_check_fn().unwrap(); @@ -217,7 +217,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( panic_bounds_check, Abi::Rust, - &[index.into(), len.into()], + &[index, len], None, StackPopCleanup::Goto { ret: None, unwind }, )?; @@ -226,9 +226,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Forward to `panic_misaligned_pointer_dereference` lang item. // First arg: required. - let required = this.read_scalar(&this.eval_operand(required, None)?)?; + let required = this.read_immediate(&this.eval_operand(required, None)?)?; // Second arg: found. - let found = this.read_scalar(&this.eval_operand(found, None)?)?; + let found = this.read_immediate(&this.eval_operand(found, None)?)?; // Call the lang item. let panic_misaligned_pointer_dereference = @@ -238,7 +238,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( panic_misaligned_pointer_dereference, Abi::Rust, - &[required.into(), found.into()], + &[required, found], None, StackPopCleanup::Goto { ret: None, unwind }, )?; diff --git a/src/tools/miri/src/shims/tls.rs b/src/tools/miri/src/shims/tls.rs index 8707446878955..52d83cd72997e 100644 --- a/src/tools/miri/src/shims/tls.rs +++ b/src/tools/miri/src/shims/tls.rs @@ -315,6 +315,8 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // FIXME: Technically, the reason should be `DLL_PROCESS_DETACH` when the main thread exits // but std treats both the same. let reason = this.eval_windows("c", "DLL_THREAD_DETACH"); + let null_ptr = + ImmTy::from_scalar(Scalar::null_ptr(this), this.machine.layouts.const_raw_ptr); // The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`. // FIXME: `h` should be a handle to the current module and what `pv` should be is unknown @@ -322,7 +324,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( thread_callback, Abi::System { unwind: false }, - &[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()], + &[null_ptr.clone(), ImmTy::from_scalar(reason, this.machine.layouts.u32), null_ptr], None, StackPopCleanup::Root { cleanup: true }, )?; @@ -343,7 +345,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( instance, Abi::C { unwind: false }, - &[data.into()], + &[ImmTy::from_scalar(data, this.machine.layouts.mut_raw_ptr)], None, StackPopCleanup::Root { cleanup: true }, )?; @@ -380,7 +382,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.call_function( instance, Abi::C { unwind: false }, - &[ptr.into()], + &[ImmTy::from_scalar(ptr, this.machine.layouts.mut_raw_ptr)], None, StackPopCleanup::Root { cleanup: true }, )?; diff --git a/src/tools/miri/src/shims/unix/thread.rs b/src/tools/miri/src/shims/unix/thread.rs index 83bb95c797d97..56e8270aa62ac 100644 --- a/src/tools/miri/src/shims/unix/thread.rs +++ b/src/tools/miri/src/shims/unix/thread.rs @@ -1,5 +1,4 @@ use crate::*; -use rustc_middle::ty::layout::LayoutOf; use rustc_target::spec::abi::Abi; impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {} @@ -24,7 +23,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { start_routine, Abi::C { unwind: false }, func_arg, - this.layout_of(this.tcx.types.usize)?, + this.machine.layouts.mut_raw_ptr, )?; Ok(()) diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_few_args.rs b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_few_args.rs index 96dd99e884428..39b1c3007cb00 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_few_args.rs +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_few_args.rs @@ -1,11 +1,12 @@ //@ignore-target-windows: No pthreads on Windows +//~^ERROR: calling a function with more arguments than it expected //! The thread function must have exactly one argument. use std::{mem, ptr}; extern "C" fn thread_start() -> *mut libc::c_void { - panic!() //~ ERROR: callee has fewer arguments than expected + panic!() } fn main() { diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_few_args.stderr b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_few_args.stderr index ca6a05ac7dd43..aa67420c7538a 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_few_args.stderr +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_few_args.stderr @@ -1,14 +1,10 @@ -error: Undefined Behavior: callee has fewer arguments than expected - --> $DIR/libc_pthread_create_too_few_args.rs:LL:CC - | -LL | panic!() - | ^^^^^^^^ callee has fewer arguments than expected +error: Undefined Behavior: calling a function with more arguments than it expected | + = note: calling a function with more arguments than it expected + = note: (no span available) = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information = note: BACKTRACE on thread `unnamed-ID`: - = note: inside `thread_start` at RUSTLIB/core/src/panic.rs:LL:CC - = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `panic` (in Nightly builds, run with -Z macro-backtrace for more info) error: aborting due to 1 previous error diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_many_args.rs b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_many_args.rs index d8fbc68d344cd..fc2ab71dff735 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_many_args.rs +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_many_args.rs @@ -1,11 +1,12 @@ //@ignore-target-windows: No pthreads on Windows +//~^ERROR: calling a function with fewer arguments than it requires //! The thread function must have exactly one argument. use std::{mem, ptr}; extern "C" fn thread_start(_null: *mut libc::c_void, _x: i32) -> *mut libc::c_void { - panic!() //~ ERROR: callee has more arguments than expected + panic!() } fn main() { diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_many_args.stderr b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_many_args.stderr index 6ab48a7666673..4de947b1694bf 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_many_args.stderr +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_create_too_many_args.stderr @@ -1,14 +1,10 @@ -error: Undefined Behavior: callee has more arguments than expected - --> $DIR/libc_pthread_create_too_many_args.rs:LL:CC - | -LL | panic!() - | ^^^^^^^^ callee has more arguments than expected +error: Undefined Behavior: calling a function with fewer arguments than it requires | + = note: calling a function with fewer arguments than it requires + = note: (no span available) = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information = note: BACKTRACE on thread `unnamed-ID`: - = note: inside `thread_start` at RUSTLIB/core/src/panic.rs:LL:CC - = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `panic` (in Nightly builds, run with -Z macro-backtrace for more info) error: aborting due to 1 previous error diff --git a/src/tools/miri/tests/fail/function_calls/check_callback_abi.rs b/src/tools/miri/tests/fail/function_calls/check_callback_abi.rs index fd667fbe454c1..6a7a26710d16b 100644 --- a/src/tools/miri/tests/fail/function_calls/check_callback_abi.rs +++ b/src/tools/miri/tests/fail/function_calls/check_callback_abi.rs @@ -9,7 +9,7 @@ fn main() { // Make sure we check the ABI when Miri itself invokes a function // as part of a shim implementation. std::intrinsics::catch_unwind( - //~^ ERROR: calling a function with ABI C using caller ABI Rust + //~^ ERROR: calling a function with calling convention C using calling convention Rust std::mem::transmute::(try_fn), std::ptr::null_mut(), |_, _| unreachable!(), diff --git a/src/tools/miri/tests/fail/function_calls/check_callback_abi.stderr b/src/tools/miri/tests/fail/function_calls/check_callback_abi.stderr index 501f17c86d69c..890fed09e4819 100644 --- a/src/tools/miri/tests/fail/function_calls/check_callback_abi.stderr +++ b/src/tools/miri/tests/fail/function_calls/check_callback_abi.stderr @@ -1,4 +1,4 @@ -error: Undefined Behavior: calling a function with ABI C using caller ABI Rust +error: Undefined Behavior: calling a function with calling convention C using calling convention Rust --> $DIR/check_callback_abi.rs:LL:CC | LL | / std::intrinsics::catch_unwind( @@ -7,7 +7,7 @@ LL | | std::mem::transmute::(try_fn), LL | | std::ptr::null_mut(), LL | | |_, _| unreachable!(), LL | | ); - | |_________^ calling a function with ABI C using caller ABI Rust + | |_________^ calling a function with calling convention C using calling convention Rust | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/src/tools/miri/tests/pass/alloc-access-tracking.rs b/src/tools/miri/tests/pass/alloc-access-tracking.rs index a226783155b42..40b8e23a33c47 100644 --- a/src/tools/miri/tests/pass/alloc-access-tracking.rs +++ b/src/tools/miri/tests/pass/alloc-access-tracking.rs @@ -1,7 +1,7 @@ #![feature(start)] #![no_std] -//@compile-flags: -Zmiri-track-alloc-id=20 -Zmiri-track-alloc-accesses -Cpanic=abort -//@normalize-stderr-test: "id 20" -> "id $$ALLOC" +//@compile-flags: -Zmiri-track-alloc-id=21 -Zmiri-track-alloc-accesses -Cpanic=abort +//@normalize-stderr-test: "id 21" -> "id $$ALLOC" //@only-target-linux: alloc IDs differ between OSes (due to extern static allocations) extern "Rust" { diff --git a/tests/run-make/dos-device-input/rmake.rs b/tests/run-make/dos-device-input/rmake.rs new file mode 100644 index 0000000000000..dee3b86f09591 --- /dev/null +++ b/tests/run-make/dos-device-input/rmake.rs @@ -0,0 +1,13 @@ +//@ only-windows +// Reason: dos devices are a Windows thing + +use std::path::Path; + +use run_make_support::{rustc, static_lib_name}; + +fn main() { + rustc().input(r"\\.\NUL").crate_type("staticlib").run(); + rustc().input(r"\\?\NUL").crate_type("staticlib").run(); + + assert!(Path::new(&static_lib_name("rust_out")).exists()); +} diff --git a/tests/ui/attributes/check-cfg_attr-ice.rs b/tests/ui/attributes/check-cfg_attr-ice.rs new file mode 100644 index 0000000000000..5bf8baab14140 --- /dev/null +++ b/tests/ui/attributes/check-cfg_attr-ice.rs @@ -0,0 +1,68 @@ +//! I missed a `cfg_attr` match in #128581, it should have had the same treatment as `cfg`. If +//! an invalid attribute starting with `cfg_attr` is passed, then it would trigger an ICE because +//! it was not considered "checked" (e.g. `#[cfg_attr::skip]` or `#[cfg_attr::no_such_thing]`). +//! +//! This test is not exhaustive, there's too many possible positions to check, instead it just does +//! a basic smoke test in a few select positions to make sure we don't ICE for e.g. +//! `#[cfg_attr::no_such_thing]`. +//! +//! issue: rust-lang/rust#128716 +#![crate_type = "lib"] + +#[cfg_attr::no_such_thing] +//~^ ERROR failed to resolve +mod we_are_no_strangers_to_love {} + +#[cfg_attr::no_such_thing] +//~^ ERROR failed to resolve +struct YouKnowTheRules { + #[cfg_attr::no_such_thing] + //~^ ERROR failed to resolve + and_so_do_i: u8, +} + +#[cfg_attr::no_such_thing] +//~^ ERROR failed to resolve +fn a_full_commitment() { + #[cfg_attr::no_such_thing] + //~^ ERROR failed to resolve + let is_what_i_am_thinking_of = (); +} + +#[cfg_attr::no_such_thing] +//~^ ERROR failed to resolve +union AnyOtherGuy { + owo: () +} +struct This; + +#[cfg_attr(FALSE, doc = "you wouldn't get this")] +impl From for This { + #[cfg_attr::no_such_thing] + //~^ ERROR failed to resolve + fn from(#[cfg_attr::no_such_thing] any_other_guy: AnyOtherGuy) -> This { + //~^ ERROR failed to resolve + #[cfg_attr::no_such_thing] + //~^ ERROR attributes on expressions are experimental + //~| ERROR failed to resolve + unreachable!() + } +} + +#[cfg_attr::no_such_thing] +//~^ ERROR failed to resolve +enum NeverGonna { + #[cfg_attr::no_such_thing] + //~^ ERROR failed to resolve + GiveYouUp(#[cfg_attr::no_such_thing] u8), + //~^ ERROR failed to resolve + LetYouDown { + #![cfg_attr::no_such_thing] + //~^ ERROR an inner attribute is not permitted in this context + never_gonna: (), + round_around: (), + #[cfg_attr::no_such_thing] + //~^ ERROR failed to resolve + and_desert_you: (), + }, +} diff --git a/tests/ui/attributes/check-cfg_attr-ice.stderr b/tests/ui/attributes/check-cfg_attr-ice.stderr new file mode 100644 index 0000000000000..dbdf32597f7b2 --- /dev/null +++ b/tests/ui/attributes/check-cfg_attr-ice.stderr @@ -0,0 +1,101 @@ +error: an inner attribute is not permitted in this context + --> $DIR/check-cfg_attr-ice.rs:60:9 + | +LL | #![cfg_attr::no_such_thing] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: inner attributes, like `#![no_std]`, annotate the item enclosing them, and are usually found at the beginning of source files + = note: outer attributes, like `#[test]`, annotate the item following them + +error[E0658]: attributes on expressions are experimental + --> $DIR/check-cfg_attr-ice.rs:45:9 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: see issue #15701 for more information + = help: add `#![feature(stmt_expr_attributes)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:52:3 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:55:7 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:57:17 + | +LL | GiveYouUp(#[cfg_attr::no_such_thing] u8), + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:64:11 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:41:7 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:43:15 + | +LL | fn from(#[cfg_attr::no_such_thing] any_other_guy: AnyOtherGuy) -> This { + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:45:11 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:32:3 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:24:3 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:27:7 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:16:3 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:19:7 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error[E0433]: failed to resolve: use of undeclared crate or module `cfg_attr` + --> $DIR/check-cfg_attr-ice.rs:12:3 + | +LL | #[cfg_attr::no_such_thing] + | ^^^^^^^^ use of undeclared crate or module `cfg_attr` + +error: aborting due to 15 previous errors + +Some errors have detailed explanations: E0433, E0658. +For more information about an error, try `rustc --explain E0433`. diff --git a/triagebot.toml b/triagebot.toml index 2795f93728470..33108f743cb94 100644 --- a/triagebot.toml +++ b/triagebot.toml @@ -778,6 +778,14 @@ If this was unintentional then you should revert the changes before this PR is m Otherwise, you can ignore this comment. """ +[mentions."library/Cargo.lock"] +message = """ +These commits modify the `library/Cargo.lock` file. Unintentional changes to `library/Cargo.lock` can be introduced when switching branches and rebasing PRs. + +If this was unintentional then you should revert the changes before this PR is merged. +Otherwise, you can ignore this comment. +""" + [mentions."src/tools/x"] message = "`src/tools/x` was changed. Bump version of Cargo.toml in `src/tools/x` so tidy will suggest installing the new version."