diff --git a/configure b/configure index 18dc99dd6c34c..a48ff6a76109c 100755 --- a/configure +++ b/configure @@ -733,8 +733,6 @@ if [ -n "$CFG_ENABLE_DEBUG_ASSERTIONS" ]; then putvar CFG_ENABLE_DEBUG_ASSERTION if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi -if [ -n "$CFG_DISABLE_ORBIT" ]; then putvar CFG_DISABLE_ORBIT; fi - step_msg "looking for build programs" probe_need CFG_CURL curl diff --git a/mk/main.mk b/mk/main.mk index 1725143325c61..428d9d16182ab 100644 --- a/mk/main.mk +++ b/mk/main.mk @@ -162,12 +162,6 @@ ifdef CFG_ENABLE_DEBUGINFO CFG_RUSTC_FLAGS += -g endif -ifdef CFG_DISABLE_ORBIT - $(info cfg: HOLD HOLD HOLD (CFG_DISABLE_ORBIT)) - RUSTFLAGS_STAGE1 += -Z orbit=off - RUSTFLAGS_STAGE2 += -Z orbit=off -endif - ifdef SAVE_TEMPS CFG_RUSTC_FLAGS += -C save-temps endif diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 64b780413f884..9c9f1e7b9de07 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -121,7 +121,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// } /// ``` -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Arc { ptr: Shared>, @@ -147,7 +147,7 @@ impl, U: ?Sized> CoerceUnsized> for Arc {} /// nodes behind strong `Arc` pointers, and then storing the parent pointers /// as `Weak` pointers. -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "arc_weak", since = "1.4.0")] pub struct Weak { ptr: Shared>, @@ -559,15 +559,6 @@ impl Drop for Arc { #[unsafe_destructor_blind_to_params] #[inline] fn drop(&mut self) { - // This structure has #[unsafe_no_drop_flag], so this drop glue may run - // more than once (but it is guaranteed to be zeroed after the first if - // it's run more than once) - let thin = *self.ptr as *const (); - - if thin as usize == mem::POST_DROP_USIZE { - return; - } - // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. This // same logic applies to the below `fetch_sub` to the `weak` count. @@ -755,12 +746,6 @@ impl Drop for Weak { /// ``` fn drop(&mut self) { let ptr = *self.ptr; - let thin = ptr as *const (); - - // see comments above for why this check is here - if thin as usize == mem::POST_DROP_USIZE { - return; - } // If we find out that we were the last weak pointer, then its time to // deallocate the data entirely. See the discussion in Arc::drop() about diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 90037f813cda6..d9fd2d92710dc 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -88,7 +88,7 @@ #![feature(staged_api)] #![feature(unboxed_closures)] #![feature(unique)] -#![feature(unsafe_no_drop_flag, filling_drop)] +#![cfg_attr(stage0, feature(unsafe_no_drop_flag))] #![feature(unsize)] #![cfg_attr(not(test), feature(fused, raw, fn_traits, placement_new_protocol))] diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index cdb70ce57708a..23542215fa890 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -44,7 +44,7 @@ use core::cmp; /// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity /// field. This allows zero-sized types to not be special-cased by consumers of /// this type. -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] pub struct RawVec { ptr: Unique, cap: usize, @@ -546,13 +546,6 @@ impl RawVec { mem::forget(self); output } - - /// This is a stupid name in the hopes that someone will find this in the - /// not too distant future and remove it with the rest of - /// #[unsafe_no_drop_flag] - pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool { - self.cap != mem::POST_DROP_USIZE - } } impl Drop for RawVec { @@ -560,7 +553,7 @@ impl Drop for RawVec { /// Frees the memory owned by the RawVec *without* trying to Drop its contents. fn drop(&mut self) { let elem_size = mem::size_of::(); - if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() { + if elem_size != 0 && self.cap != 0 { let align = mem::align_of::(); let num_bytes = elem_size * self.cap; diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 3a158240c3a26..8e43e9eec1608 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -182,7 +182,7 @@ struct RcBox { /// A reference-counted pointer type over an immutable value. /// /// See the [module level documentation](./index.html) for more details. -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc { ptr: Shared>, @@ -466,21 +466,18 @@ impl Drop for Rc { fn drop(&mut self) { unsafe { let ptr = *self.ptr; - let thin = ptr as *const (); - if thin as usize != mem::POST_DROP_USIZE { - self.dec_strong(); - if self.strong() == 0 { - // destroy the contained object - ptr::drop_in_place(&mut (*ptr).value); + self.dec_strong(); + if self.strong() == 0 { + // destroy the contained object + ptr::drop_in_place(&mut (*ptr).value); - // remove the implicit "strong weak" pointer now that we've - // destroyed the contents. - self.dec_weak(); + // remove the implicit "strong weak" pointer now that we've + // destroyed the contents. + self.dec_weak(); - if self.weak() == 0 { - deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) - } + if self.weak() == 0 { + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } @@ -724,7 +721,7 @@ impl From for Rc { /// dropped. /// /// See the [module level documentation](./index.html) for more. -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "rc_weak", since = "1.4.0")] pub struct Weak { ptr: Shared>, @@ -825,15 +822,12 @@ impl Drop for Weak { fn drop(&mut self) { unsafe { let ptr = *self.ptr; - let thin = ptr as *const (); - if thin as usize != mem::POST_DROP_USIZE { - self.dec_weak(); - // the weak count starts at 1, and will only go to zero if all - // the strong pointers have disappeared. - if self.weak() == 0 { - deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) - } + self.dec_weak(); + // the weak count starts at 1, and will only go to zero if all + // the strong pointers have disappeared. + if self.weak() == 0 { + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } diff --git a/src/libcollections/lib.rs b/src/libcollections/lib.rs index 2781059c1d543..c5a921693475a 100644 --- a/src/libcollections/lib.rs +++ b/src/libcollections/lib.rs @@ -52,7 +52,7 @@ #![feature(step_by)] #![feature(unicode)] #![feature(unique)] -#![feature(unsafe_no_drop_flag)] +#![cfg_attr(stage0, feature(unsafe_no_drop_flag))] #![cfg_attr(test, feature(rand, test))] #![no_std] diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index a866cdeb7ec3e..876314613f523 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -268,7 +268,7 @@ use super::range::RangeArgument; /// Vec does not currently guarantee the order in which elements are dropped /// (the order has changed in the past, and may change again). /// -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Vec { buf: RawVec, @@ -1600,11 +1600,9 @@ impl Ord for Vec { impl Drop for Vec { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { - if self.buf.unsafe_no_drop_flag_needs_drop() { - unsafe { - // use drop for [T] - ptr::drop_in_place(&mut self[..]); - } + unsafe { + // use drop for [T] + ptr::drop_in_place(&mut self[..]); } // RawVec handles deallocation } diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index c645608dda790..3c2c5abcb2821 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -244,19 +244,6 @@ extern "rust-intrinsic" { /// crate it is invoked in. pub fn type_id() -> u64; - /// Creates a value initialized to so that its drop flag, - /// if any, says that it has been dropped. - /// - /// `init_dropped` is unsafe because it returns a datum with all - /// of its bytes set to the drop flag, which generally does not - /// correspond to a valid value. - /// - /// This intrinsic is likely to be deprecated in the future when - /// Rust moves to non-zeroing dynamic drop (and thus removes the - /// embedded drop flags that are being established by this - /// intrinsic). - pub fn init_dropped() -> T; - /// Creates a value initialized to zero. /// /// `init` is unsafe because it returns a zeroed-out datum, diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 5c2179ccf33a1..6ebbe97d064a2 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -241,27 +241,6 @@ pub unsafe fn zeroed() -> T { intrinsics::init() } -/// Creates a value initialized to an unspecified series of bytes. -/// -/// The byte sequence usually indicates that the value at the memory -/// in question has been dropped. Thus, *if* T carries a drop flag, -/// any associated destructor will not be run when the value falls out -/// of scope. -/// -/// Some code at one time used the `zeroed` function above to -/// accomplish this goal. -/// -/// This function is expected to be deprecated with the transition -/// to non-zeroing drop. -#[inline] -#[unstable(feature = "filling_drop", issue = "5016")] -pub unsafe fn dropped() -> T { - #[inline(always)] - unsafe fn dropped_impl() -> T { intrinsics::init_dropped() } - - dropped_impl() -} - /// Bypasses Rust's normal memory-initialization checks by pretending to /// produce a value of type T, while doing nothing at all. /// @@ -518,56 +497,6 @@ pub fn replace(dest: &mut T, mut src: T) -> T { #[stable(feature = "rust1", since = "1.0.0")] pub fn drop(_x: T) { } -macro_rules! repeat_u8_as_u16 { - ($name:expr) => { (($name as u16) << 8 | - ($name as u16)) } -} -macro_rules! repeat_u8_as_u32 { - ($name:expr) => { (($name as u32) << 24 | - ($name as u32) << 16 | - ($name as u32) << 8 | - ($name as u32)) } -} -macro_rules! repeat_u8_as_u64 { - ($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 | - (repeat_u8_as_u32!($name) as u64)) } -} - -// NOTE: Keep synchronized with values used in librustc_trans::trans::adt. -// -// In particular, the POST_DROP_U8 marker must never equal the -// DTOR_NEEDED_U8 marker. -// -// For a while pnkfelix was using 0xc1 here. -// But having the sign bit set is a pain, so 0x1d is probably better. -// -// And of course, 0x00 brings back the old world of zero'ing on drop. -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U8: u8 = 0x1d; -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U16: u16 = repeat_u8_as_u16!(POST_DROP_U8); -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8); -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8); - -#[cfg(target_pointer_width = "16")] -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_USIZE: usize = POST_DROP_U16 as usize; -#[cfg(target_pointer_width = "32")] -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize; -#[cfg(target_pointer_width = "64")] -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize; - /// Interprets `src` as `&U`, and then reads `src` without moving the contained /// value. /// diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 8cb485872b3f3..dd76843793332 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -140,21 +140,6 @@ pub unsafe fn read(src: *const T) -> T { tmp } -#[allow(missing_docs)] -#[inline(always)] -#[unstable(feature = "filling_drop", - reason = "may play a larger role in std::ptr future extensions", - issue = "5016")] -pub unsafe fn read_and_drop(dest: *mut T) -> T { - // Copy the data out from `dest`: - let tmp = read(&*dest); - - // Now mark `dest` as dropped: - write_bytes(dest, mem::POST_DROP_U8, 1); - - tmp -} - /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs index 04031fabc5866..5e14bb51ce867 100644 --- a/src/librustc/hir/map/mod.rs +++ b/src/librustc/hir/map/mod.rs @@ -315,8 +315,7 @@ impl<'ast> Map<'ast> { RootInlinedParent(parent) => match *parent { InlinedItem::Item(def_id, _) | InlinedItem::TraitItem(def_id, _) | - InlinedItem::ImplItem(def_id, _) | - InlinedItem::Foreign(def_id, _) => + InlinedItem::ImplItem(def_id, _) => return DepNode::MetaData(def_id) }, @@ -940,8 +939,6 @@ pub fn map_decoded_item<'ast, F: FoldOps>(map: &Map<'ast>, II::ImplItem(fld.fold_ops.new_def_id(d), ii.map(|ii| fld.fold_impl_item(ii))) } - II::Foreign(d, i) => II::Foreign(fld.fold_ops.new_def_id(d), - i.map(|i| fld.fold_foreign_item(i))) }; let ii = map.forest.inlined_items.alloc(ii); diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index abb22783ddc84..92e1b0681cc7e 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -96,8 +96,7 @@ pub enum DefLike { pub enum InlinedItem { Item(DefId /* def-id in source crate */, P), TraitItem(DefId /* impl id */, P), - ImplItem(DefId /* impl id */, P), - Foreign(DefId /* extern item */, P), + ImplItem(DefId /* impl id */, P) } /// A borrowed version of `hir::InlinedItem`. @@ -105,8 +104,7 @@ pub enum InlinedItem { pub enum InlinedItemRef<'a> { Item(DefId, &'a hir::Item), TraitItem(DefId, &'a hir::TraitItem), - ImplItem(DefId, &'a hir::ImplItem), - Foreign(DefId, &'a hir::ForeignItem) + ImplItem(DefId, &'a hir::ImplItem) } /// Item definitions in the currently-compiled crate would have the CrateNum @@ -286,7 +284,6 @@ impl InlinedItem { { match *self { InlinedItem::Item(_, ref i) => visitor.visit_item(&i), - InlinedItem::Foreign(_, ref i) => visitor.visit_foreign_item(&i), InlinedItem::TraitItem(_, ref ti) => visitor.visit_trait_item(ti), InlinedItem::ImplItem(_, ref ii) => visitor.visit_impl_item(ii), } diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index e988ddcd97b15..a991a1a9ba4b5 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -605,8 +605,6 @@ macro_rules! options { pub const parse_bool: Option<&'static str> = None; pub const parse_opt_bool: Option<&'static str> = Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`"); - pub const parse_all_bool: Option<&'static str> = - Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`"); pub const parse_string: Option<&'static str> = Some("a string"); pub const parse_opt_string: Option<&'static str> = Some("a string"); pub const parse_list: Option<&'static str> = Some("a space-separated list of strings"); @@ -656,25 +654,6 @@ macro_rules! options { } } - fn parse_all_bool(slot: &mut bool, v: Option<&str>) -> bool { - match v { - Some(s) => { - match s { - "n" | "no" | "off" => { - *slot = false; - } - "y" | "yes" | "on" => { - *slot = true; - } - _ => { return false; } - } - - true - }, - None => { *slot = true; true } - } - } - fn parse_opt_string(slot: &mut Option, v: Option<&str>) -> bool { match v { Some(s) => { *slot = Some(s.to_string()); true }, @@ -910,8 +889,6 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "adds unstable command line options to rustc interface"), force_overflow_checks: Option = (None, parse_opt_bool, [TRACKED], "force overflow checks on or off"), - force_dropflag_checks: Option = (None, parse_opt_bool, [TRACKED], - "force drop flag checks on or off"), trace_macros: bool = (false, parse_bool, [UNTRACKED], "for every macro invocation, print its name and arguments"), enable_nonzeroing_move_hints: bool = (false, parse_bool, [TRACKED], @@ -930,8 +907,6 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "dump MIR state at various points in translation"), dump_mir_dir: Option = (None, parse_opt_string, [UNTRACKED], "the directory the MIR is dumped into"), - orbit: bool = (true, parse_all_bool, [UNTRACKED], - "get MIR where it belongs - everywhere; most importantly, in orbit"), } pub fn default_lib_output() -> CrateType { @@ -1324,15 +1299,7 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) }) }); - let mut debugging_opts = build_debugging_options(matches, error_format); - - // Incremental compilation only works reliably when translation is done via - // MIR, so let's enable -Z orbit if necessary (see #34973). - if debugging_opts.incremental.is_some() && !debugging_opts.orbit { - early_warn(error_format, "Automatically enabling `-Z orbit` because \ - `-Z incremental` was specified"); - debugging_opts.orbit = true; - } + let debugging_opts = build_debugging_options(matches, error_format); let mir_opt_level = debugging_opts.mir_opt_level.unwrap_or(1); @@ -2424,8 +2391,6 @@ mod tests { assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.dump_mir_dir = Some(String::from("abc")); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); - opts.debugging_opts.orbit = false; - assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); // Make sure changing a [TRACKED] option changes the hash opts = reference.clone(); @@ -2460,10 +2425,6 @@ mod tests { opts.debugging_opts.force_overflow_checks = Some(true); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); - opts = reference.clone(); - opts.debugging_opts.force_dropflag_checks = Some(true); - assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); - opts = reference.clone(); opts.debugging_opts.enable_nonzeroing_move_hints = true; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 195cece6bc4e0..1ede8545e08e8 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -891,17 +891,6 @@ impl<'a, 'gcx, 'tcx> Layout { let mut st = Struct::new(dl, packed); st.extend(dl, fields, ty)?; - // FIXME(16758) don't add a drop flag to unsized structs, as it - // won't actually be in the location we say it is because it'll be after - // the unsized field. Several other pieces of code assume that the unsized - // field is definitely the last one. - if def.dtor_kind().has_drop_flag() && - ty.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) { - st.extend(dl, Some(Ok(&Scalar { - value: Int(I8), - non_zero: false - })).into_iter(), ty)?; - } Univariant { variant: st, non_zero: Some(def.did) == tcx.lang_items.non_zero() @@ -911,24 +900,18 @@ impl<'a, 'gcx, 'tcx> Layout { let hint = *tcx.lookup_repr_hints(def.did).get(0) .unwrap_or(&attr::ReprAny); - let dtor = def.dtor_kind().has_drop_flag(); - let drop_flag = if dtor { - Some(Scalar { value: Int(I8), non_zero: false }) - } else { - None - }; - if def.variants.is_empty() { // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) assert_eq!(hint, attr::ReprAny); - let mut st = Struct::new(dl, false); - st.extend(dl, drop_flag.iter().map(Ok), ty)?; - return success(Univariant { variant: st, non_zero: false }); + return success(Univariant { + variant: Struct::new(dl, false), + non_zero: false + }); } - if !dtor && def.variants.iter().all(|v| v.fields.is_empty()) { + if def.variants.iter().all(|v| v.fields.is_empty()) { // All bodies empty -> intlike let (mut min, mut max) = (i64::MAX, i64::MIN); for v in &def.variants { @@ -964,7 +947,7 @@ impl<'a, 'gcx, 'tcx> Layout { field.ty(tcx, substs).layout(infcx) }); let mut st = Struct::new(dl, false); - st.extend(dl, fields.chain(drop_flag.iter().map(Ok)), ty)?; + st.extend(dl, fields, ty)?; return success(Univariant { variant: st, non_zero: false }); } @@ -973,7 +956,7 @@ impl<'a, 'gcx, 'tcx> Layout { v.fields.iter().map(|field| field.ty(tcx, substs)).collect::>() }).collect::>(); - if !dtor && variants.len() == 2 && hint == attr::ReprAny { + if variants.len() == 2 && hint == attr::ReprAny { // Nullable pointer optimization for discr in 0..2 { let other_fields = variants[1 - discr].iter().map(|ty| { @@ -1045,8 +1028,7 @@ impl<'a, 'gcx, 'tcx> Layout { Ok(field) }); let mut st = Struct::new(dl, false); - st.extend(dl, discr.iter().map(Ok).chain(fields) - .chain(drop_flag.iter().map(Ok)), ty)?; + st.extend(dl, discr.iter().map(Ok).chain(fields), ty)?; size = cmp::max(size, st.min_size()); align = align.max(st.align); Ok(st) @@ -1277,11 +1259,6 @@ impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> { return Err(err); } - // If there's a drop flag, it can't be just a pointer. - if def.dtor_kind().has_drop_flag() { - return Err(err); - } - // Get a zero-sized variant or a pointer newtype. let zero_or_ptr_variant = |i: usize| { let fields = def.variants[i].fields.iter().map(|field| { diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 1f747ddfb2959..6c82157c8ca7c 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -122,23 +122,16 @@ pub struct CrateAnalysis<'a> { #[derive(Copy, Clone)] pub enum DtorKind { NoDtor, - TraitDtor(bool) + TraitDtor } impl DtorKind { pub fn is_present(&self) -> bool { match *self { - TraitDtor(..) => true, + TraitDtor => true, _ => false } } - - pub fn has_drop_flag(&self) -> bool { - match self { - &NoDtor => false, - &TraitDtor(flag) => flag - } - } } #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -1440,7 +1433,6 @@ bitflags! { const IS_PHANTOM_DATA = 1 << 3, const IS_SIMD = 1 << 4, const IS_FUNDAMENTAL = 1 << 5, - const IS_NO_DROP_FLAG = 1 << 6, } } @@ -1558,9 +1550,6 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> { if attr::contains_name(&attrs, "fundamental") { flags = flags | AdtFlags::IS_FUNDAMENTAL; } - if attr::contains_name(&attrs, "unsafe_no_drop_flag") { - flags = flags | AdtFlags::IS_NO_DROP_FLAG; - } if tcx.lookup_simd(did) { flags = flags | AdtFlags::IS_SIMD; } @@ -1627,10 +1616,7 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> { /// Returns whether this type has a destructor. pub fn has_dtor(&self) -> bool { - match self.dtor_kind() { - NoDtor => false, - TraitDtor(..) => true - } + self.dtor_kind().is_present() } /// Asserts this is a struct and returns the struct's unique @@ -1710,9 +1696,7 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> { pub fn dtor_kind(&self) -> DtorKind { match self.destructor.get() { - Some(_) => { - TraitDtor(!self.flags.get().intersects(AdtFlags::IS_NO_DROP_FLAG)) - } + Some(_) => TraitDtor, None => NoDtor, } } diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 61d927239828b..b4a2648b5dca7 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -45,7 +45,7 @@ use std::collections::HashSet; use syntax::{ast}; use syntax::attr::{self, AttrMetaMethods, AttributeMethods}; -use syntax_pos::{self, Span}; +use syntax_pos::Span; use rustc::hir::{self, PatKind}; use rustc::hir::intravisit::FnKind; @@ -1154,56 +1154,3 @@ impl LateLintPass for UnstableFeatures { } } } - -/// Lints for attempts to impl Drop on types that have `#[repr(C)]` -/// attribute (see issue #24585). -#[derive(Copy, Clone)] -pub struct DropWithReprExtern; - -declare_lint! { - DROP_WITH_REPR_EXTERN, - Warn, - "use of #[repr(C)] on a type that implements Drop" -} - -impl LintPass for DropWithReprExtern { - fn get_lints(&self) -> LintArray { - lint_array!(DROP_WITH_REPR_EXTERN) - } -} - -impl LateLintPass for DropWithReprExtern { - fn check_crate(&mut self, ctx: &LateContext, _: &hir::Crate) { - let drop_trait = match ctx.tcx.lang_items.drop_trait() { - Some(id) => ctx.tcx.lookup_trait_def(id), None => { return } - }; - drop_trait.for_each_impl(ctx.tcx, |drop_impl_did| { - if !drop_impl_did.is_local() { - return; - } - let dtor_self_type = ctx.tcx.lookup_item_type(drop_impl_did).ty; - - match dtor_self_type.sty { - ty::TyEnum(self_type_def, _) | - ty::TyStruct(self_type_def, _) => { - let self_type_did = self_type_def.did; - let hints = ctx.tcx.lookup_repr_hints(self_type_did); - if hints.iter().any(|attr| *attr == attr::ReprExtern) && - self_type_def.dtor_kind().has_drop_flag() { - let drop_impl_span = ctx.tcx.map.def_id_span(drop_impl_did, - syntax_pos::DUMMY_SP); - let self_defn_span = ctx.tcx.map.def_id_span(self_type_did, - syntax_pos::DUMMY_SP); - ctx.span_lint_note(DROP_WITH_REPR_EXTERN, - drop_impl_span, - "implementing Drop adds hidden state to types, \ - possibly conflicting with `#[repr(C)]`", - self_defn_span, - "the `#[repr(C)]` attribute is attached here"); - } - } - _ => {} - } - }) - } -} diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index cb0036eb5b034..1a4330f58c3cd 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -127,7 +127,6 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { UnconditionalRecursion, InvalidNoMangleItems, PluginAsLibrary, - DropWithReprExtern, MutableTransmutes, ); @@ -218,4 +217,5 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { // This was renamed to raw_pointer_derive, which was then removed, // so it is also considered removed store.register_removed("raw_pointer_deriving", "using derive with raw pointers is ok"); + store.register_removed("drop_with_repr_extern", "drop flags have been removed"); } diff --git a/src/librustc_metadata/astencode.rs b/src/librustc_metadata/astencode.rs index 1ef48e6d6565f..ad52d346857ff 100644 --- a/src/librustc_metadata/astencode.rs +++ b/src/librustc_metadata/astencode.rs @@ -79,7 +79,6 @@ pub fn encode_inlined_item(ecx: &e::EncodeContext, ii: InlinedItemRef) { let id = match ii { InlinedItemRef::Item(_, i) => i.id, - InlinedItemRef::Foreign(_, i) => i.id, InlinedItemRef::TraitItem(_, ti) => ti.id, InlinedItemRef::ImplItem(_, ii) => ii.id, }; @@ -147,7 +146,6 @@ pub fn decode_inlined_item<'a, 'tcx>(cdata: &cstore::CrateMetadata, dcx); let name = match *ii { InlinedItem::Item(_, ref i) => i.name, - InlinedItem::Foreign(_, ref i) => i.name, InlinedItem::TraitItem(_, ref ti) => ti.name, InlinedItem::ImplItem(_, ref ii) => ii.name }; @@ -357,9 +355,6 @@ fn simplify_ast(ii: InlinedItemRef) -> (InlinedItem, IdRange) { InlinedItemRef::ImplItem(d, ii) => { InlinedItem::ImplItem(d, P(fold::noop_fold_impl_item(ii.clone(), &mut fld))) } - InlinedItemRef::Foreign(d, i) => { - InlinedItem::Foreign(d, P(fold::noop_fold_foreign_item(i.clone(), &mut fld))) - } }; (ii, fld.id_range) @@ -1208,8 +1203,7 @@ fn copy_item_types(dcx: &DecodeContext, ii: &InlinedItem, orig_did: DefId) { let item_node_id = match ii { &InlinedItem::Item(_, ref i) => i.id, &InlinedItem::TraitItem(_, ref ti) => ti.id, - &InlinedItem::ImplItem(_, ref ii) => ii.id, - &InlinedItem::Foreign(_, ref fi) => fi.id + &InlinedItem::ImplItem(_, ref ii) => ii.id }; copy_item_type(dcx, item_node_id, orig_did); diff --git a/src/librustc_metadata/csearch.rs b/src/librustc_metadata/csearch.rs index f6d698eb969d8..94426dcbf1d8d 100644 --- a/src/librustc_metadata/csearch.rs +++ b/src/librustc_metadata/csearch.rs @@ -562,11 +562,6 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { let inlined_root_node_id = find_inlined_item_root(item.id); cache_inlined_item(def_id, item.id, inlined_root_node_id); } - decoder::FoundAst::Found(&InlinedItem::Foreign(d, ref item)) => { - assert_eq!(d, def_id); - let inlined_root_node_id = find_inlined_item_root(item.id); - cache_inlined_item(def_id, item.id, inlined_root_node_id); - } decoder::FoundAst::FoundParent(parent_did, item) => { let inlined_root_node_id = find_inlined_item_root(item.id); cache_inlined_item(parent_did, item.id, inlined_root_node_id); diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index 420dfbc58bf19..9a668b69b2eeb 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -40,7 +40,6 @@ use std::io::prelude::*; use std::io::{Cursor, SeekFrom}; use std::rc::Rc; use std::u32; -use syntax::abi::Abi; use syntax::ast::{self, NodeId, Name, CRATE_NODE_ID, CrateNum}; use syntax::attr::{self,AttrMetaMethods,AttributeMethods}; use errors::Handler; @@ -626,11 +625,6 @@ impl<'a, 'tcx, 'encoder> ItemContentBuilder<'a, 'tcx, 'encoder> { if body.is_some() { encode_item_sort(self.rbml_w, 'p'); - encode_inlined_item(ecx, - self.rbml_w, - InlinedItemRef::TraitItem( - trait_def_id, - trait_item)); self.encode_mir(trait_item.id); } else { encode_item_sort(self.rbml_w, 'r'); @@ -728,12 +722,14 @@ impl<'a, 'tcx, 'encoder> ItemContentBuilder<'a, 'tcx, 'encoder> { let types = generics.parent_types as usize + generics.types.len(); let needs_inline = types > 0 || is_default_impl || attr::requests_inline(&impl_item.attrs); - if needs_inline || sig.constness == hir::Constness::Const { + if sig.constness == hir::Constness::Const { encode_inlined_item( ecx, self.rbml_w, InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), impl_item)); + } + if needs_inline || sig.constness == hir::Constness::Const { self.encode_mir(impl_item.id); } encode_constness(self.rbml_w, sig.constness); @@ -934,8 +930,10 @@ impl<'a, 'tcx, 'encoder> ItemContentBuilder<'a, 'tcx, 'encoder> { encode_name(self.rbml_w, item.name); encode_attributes(self.rbml_w, &item.attrs); let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs); - if needs_inline || constness == hir::Constness::Const { + if constness == hir::Constness::Const { encode_inlined_item(ecx, self.rbml_w, InlinedItemRef::Item(def_id, item)); + } + if needs_inline || constness == hir::Constness::Const { self.encode_mir(item.id); } encode_constness(self.rbml_w, constness); @@ -982,8 +980,6 @@ impl<'a, 'tcx, 'encoder> ItemContentBuilder<'a, 'tcx, 'encoder> { for v in &enum_definition.variants { encode_variant_id(self.rbml_w, ecx.tcx.map.local_def_id(v.node.data.id())); } - encode_inlined_item(ecx, self.rbml_w, InlinedItemRef::Item(def_id, item)); - self.encode_mir(item.id); // Encode inherent implementations for self enumeration. encode_inherent_implementations(ecx, self.rbml_w, def_id); @@ -1019,9 +1015,6 @@ impl<'a, 'tcx, 'encoder> ItemContentBuilder<'a, 'tcx, 'encoder> { needs to know*/ self.encode_struct_fields(variant); - encode_inlined_item(ecx, self.rbml_w, InlinedItemRef::Item(def_id, item)); - self.encode_mir(item.id); - // Encode inherent implementations for self structure. encode_inherent_implementations(ecx, self.rbml_w, def_id); @@ -1265,7 +1258,6 @@ impl<'a, 'tcx, 'encoder> ItemContentBuilder<'a, 'tcx, 'encoder> { let ecx = self.ecx(); debug!("writing foreign item {}", ecx.tcx.node_path_str(nitem.id)); - let abi = ecx.tcx.map.get_foreign_abi(nitem.id); encode_def_id_and_key(ecx, self.rbml_w, def_id); let parent_id = ecx.tcx.map.get_parent(nitem.id); @@ -1276,12 +1268,6 @@ impl<'a, 'tcx, 'encoder> ItemContentBuilder<'a, 'tcx, 'encoder> { encode_family(self.rbml_w, FN_FAMILY); self.encode_bounds_and_type_for_item(nitem.id); encode_name(self.rbml_w, nitem.name); - if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { - encode_inlined_item(ecx, - self.rbml_w, - InlinedItemRef::Foreign(def_id, nitem)); - self.encode_mir(nitem.id); - } encode_attributes(self.rbml_w, &nitem.attrs); let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(nitem.id)); let depr = ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(nitem.id)); diff --git a/src/librustc_trans/_match.rs b/src/librustc_trans/_match.rs deleted file mode 100644 index d6866b27f98a5..0000000000000 --- a/src/librustc_trans/_match.rs +++ /dev/null @@ -1,2012 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Compilation of match statements -//! -//! I will endeavor to explain the code as best I can. I have only a loose -//! understanding of some parts of it. -//! -//! ## Matching -//! -//! The basic state of the code is maintained in an array `m` of `Match` -//! objects. Each `Match` describes some list of patterns, all of which must -//! match against the current list of values. If those patterns match, then -//! the arm listed in the match is the correct arm. A given arm may have -//! multiple corresponding match entries, one for each alternative that -//! remains. As we proceed these sets of matches are adjusted by the various -//! `enter_XXX()` functions, each of which adjusts the set of options given -//! some information about the value which has been matched. -//! -//! So, initially, there is one value and N matches, each of which have one -//! constituent pattern. N here is usually the number of arms but may be -//! greater, if some arms have multiple alternatives. For example, here: -//! -//! enum Foo { A, B(int), C(usize, usize) } -//! match foo { -//! A => ..., -//! B(x) => ..., -//! C(1, 2) => ..., -//! C(_) => ... -//! } -//! -//! The value would be `foo`. There would be four matches, each of which -//! contains one pattern (and, in one case, a guard). We could collect the -//! various options and then compile the code for the case where `foo` is an -//! `A`, a `B`, and a `C`. When we generate the code for `C`, we would (1) -//! drop the two matches that do not match a `C` and (2) expand the other two -//! into two patterns each. In the first case, the two patterns would be `1` -//! and `2`, and the in the second case the _ pattern would be expanded into -//! `_` and `_`. The two values are of course the arguments to `C`. -//! -//! Here is a quick guide to the various functions: -//! -//! - `compile_submatch()`: The main workhouse. It takes a list of values and -//! a list of matches and finds the various possibilities that could occur. -//! -//! - `enter_XXX()`: modifies the list of matches based on some information -//! about the value that has been matched. For example, -//! `enter_rec_or_struct()` adjusts the values given that a record or struct -//! has been matched. This is an infallible pattern, so *all* of the matches -//! must be either wildcards or record/struct patterns. `enter_opt()` -//! handles the fallible cases, and it is correspondingly more complex. -//! -//! ## Bindings -//! -//! We store information about the bound variables for each arm as part of the -//! per-arm `ArmData` struct. There is a mapping from identifiers to -//! `BindingInfo` structs. These structs contain the mode/id/type of the -//! binding, but they also contain an LLVM value which points at an alloca -//! called `llmatch`. For by value bindings that are Copy, we also create -//! an extra alloca that we copy the matched value to so that any changes -//! we do to our copy is not reflected in the original and vice-versa. -//! We don't do this if it's a move since the original value can't be used -//! and thus allowing us to cheat in not creating an extra alloca. -//! -//! The `llmatch` binding always stores a pointer into the value being matched -//! which points at the data for the binding. If the value being matched has -//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence -//! `llmatch` has type `T**`). So, if you have a pattern like: -//! -//! let a: A = ...; -//! let b: B = ...; -//! match (a, b) { (ref c, d) => { ... } } -//! -//! For `c` and `d`, we would generate allocas of type `C*` and `D*` -//! respectively. These are called the `llmatch`. As we match, when we come -//! up against an identifier, we store the current pointer into the -//! corresponding alloca. -//! -//! Once a pattern is completely matched, and assuming that there is no guard -//! pattern, we will branch to a block that leads to the body itself. For any -//! by-value bindings, this block will first load the ptr from `llmatch` (the -//! one of type `D*`) and then load a second time to get the actual value (the -//! one of type `D`). For by ref bindings, the value of the local variable is -//! simply the first alloca. -//! -//! So, for the example above, we would generate a setup kind of like this: -//! -//! +-------+ -//! | Entry | -//! +-------+ -//! | -//! +--------------------------------------------+ -//! | llmatch_c = (addr of first half of tuple) | -//! | llmatch_d = (addr of second half of tuple) | -//! +--------------------------------------------+ -//! | -//! +--------------------------------------+ -//! | *llbinding_d = **llmatch_d | -//! +--------------------------------------+ -//! -//! If there is a guard, the situation is slightly different, because we must -//! execute the guard code. Moreover, we need to do so once for each of the -//! alternatives that lead to the arm, because if the guard fails, they may -//! have different points from which to continue the search. Therefore, in that -//! case, we generate code that looks more like: -//! -//! +-------+ -//! | Entry | -//! +-------+ -//! | -//! +-------------------------------------------+ -//! | llmatch_c = (addr of first half of tuple) | -//! | llmatch_d = (addr of first half of tuple) | -//! +-------------------------------------------+ -//! | -//! +-------------------------------------------------+ -//! | *llbinding_d = **llmatch_d | -//! | check condition | -//! | if false { goto next case } | -//! | if true { goto body } | -//! +-------------------------------------------------+ -//! -//! The handling for the cleanups is a bit... sensitive. Basically, the body -//! is the one that invokes `add_clean()` for each binding. During the guard -//! evaluation, we add temporary cleanups and revoke them after the guard is -//! evaluated (it could fail, after all). Note that guards and moves are -//! just plain incompatible. -//! -//! Some relevant helper functions that manage bindings: -//! - `create_bindings_map()` -//! - `insert_lllocals()` -//! -//! -//! ## Notes on vector pattern matching. -//! -//! Vector pattern matching is surprisingly tricky. The problem is that -//! the structure of the vector isn't fully known, and slice matches -//! can be done on subparts of it. -//! -//! The way that vector pattern matches are dealt with, then, is as -//! follows. First, we make the actual condition associated with a -//! vector pattern simply a vector length comparison. So the pattern -//! [1, .. x] gets the condition "vec len >= 1", and the pattern -//! [.. x] gets the condition "vec len >= 0". The problem here is that -//! having the condition "vec len >= 1" hold clearly does not mean that -//! only a pattern that has exactly that condition will match. This -//! means that it may well be the case that a condition holds, but none -//! of the patterns matching that condition match; to deal with this, -//! when doing vector length matches, we have match failures proceed to -//! the next condition to check. -//! -//! There are a couple more subtleties to deal with. While the "actual" -//! condition associated with vector length tests is simply a test on -//! the vector length, the actual vec_len Opt entry contains more -//! information used to restrict which matches are associated with it. -//! So that all matches in a submatch are matching against the same -//! values from inside the vector, they are split up by how many -//! elements they match at the front and at the back of the vector. In -//! order to make sure that arms are properly checked in order, even -//! with the overmatching conditions, each vec_len Opt entry is -//! associated with a range of matches. -//! Consider the following: -//! -//! match &[1, 2, 3] { -//! [1, 1, .. _] => 0, -//! [1, 2, 2, .. _] => 1, -//! [1, 2, 3, .. _] => 2, -//! [1, 2, .. _] => 3, -//! _ => 4 -//! } -//! The proper arm to match is arm 2, but arms 0 and 3 both have the -//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the -//! wrong branch would be taken. Instead, vec_len Opts are associated -//! with a contiguous range of matches that have the same "shape". -//! This is sort of ugly and requires a bunch of special handling of -//! vec_len options. - -pub use self::BranchKind::*; -pub use self::OptResult::*; -pub use self::TransBindingMode::*; -use self::Opt::*; -use self::FailureHandler::*; - -use llvm::{ValueRef, BasicBlockRef}; -use rustc_const_eval::check_match::{self, Constructor, StaticInliner}; -use rustc_const_eval::{compare_lit_exprs, eval_const_expr, fatal_const_eval_err}; -use rustc::hir::def::{Def, DefMap}; -use rustc::hir::def_id::DefId; -use middle::expr_use_visitor as euv; -use middle::lang_items::StrEqFnLangItem; -use middle::mem_categorization as mc; -use middle::mem_categorization::Categorization; -use rustc::hir::pat_util::*; -use rustc::ty::subst::Substs; -use adt; -use base::*; -use build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast}; -use build::{Not, Store, Sub, add_comment}; -use build; -use callee::{Callee, ArgVals}; -use cleanup::{self, CleanupMethods, DropHintMethods}; -use common::*; -use consts; -use datum::*; -use debuginfo::{self, DebugLoc, ToDebugLoc}; -use expr::{self, Dest}; -use monomorphize; -use tvec; -use type_of; -use Disr; -use value::Value; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::traits::Reveal; -use session::config::NoDebugInfo; -use util::common::indenter; -use util::nodemap::FnvHashMap; -use util::ppaux; - -use std; -use std::cell::RefCell; -use std::cmp::Ordering; -use std::fmt; -use std::rc::Rc; -use rustc::hir::{self, PatKind}; -use syntax::ast::{self, DUMMY_NODE_ID, NodeId}; -use syntax_pos::Span; -use rustc::hir::fold::Folder; -use syntax::ptr::P; - -#[derive(Copy, Clone, Debug)] -struct ConstantExpr<'a>(&'a hir::Expr); - -impl<'a> ConstantExpr<'a> { - fn eq<'b, 'tcx>(self, other: ConstantExpr<'a>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool { - match compare_lit_exprs(tcx, self.0.span, self.0, other.0) { - Ok(result) => result == Ordering::Equal, - Err(_) => bug!("compare_list_exprs: type mismatch"), - } - } -} - -// An option identifying a branch (either a literal, an enum variant or a range) -#[derive(Debug)] -enum Opt<'a, 'tcx> { - ConstantValue(ConstantExpr<'a>, DebugLoc), - ConstantRange(ConstantExpr<'a>, ConstantExpr<'a>, DebugLoc), - Variant(Disr, Rc>, DefId, DebugLoc), - SliceLengthEqual(usize, DebugLoc), - SliceLengthGreaterOrEqual(/* prefix length */ usize, - /* suffix length */ usize, - DebugLoc), -} - -impl<'a, 'b, 'tcx> Opt<'a, 'tcx> { - fn eq(&self, other: &Opt<'a, 'tcx>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool { - match (self, other) { - (&ConstantValue(a, _), &ConstantValue(b, _)) => a.eq(b, tcx), - (&ConstantRange(a1, a2, _), &ConstantRange(b1, b2, _)) => { - a1.eq(b1, tcx) && a2.eq(b2, tcx) - } - (&Variant(a_disr, ref a_repr, a_def, _), - &Variant(b_disr, ref b_repr, b_def, _)) => { - a_disr == b_disr && *a_repr == *b_repr && a_def == b_def - } - (&SliceLengthEqual(a, _), &SliceLengthEqual(b, _)) => a == b, - (&SliceLengthGreaterOrEqual(a1, a2, _), - &SliceLengthGreaterOrEqual(b1, b2, _)) => { - a1 == b1 && a2 == b2 - } - _ => false - } - } - - fn trans<'blk>(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> { - use consts::TrueConst::Yes; - let _icx = push_ctxt("match::trans_opt"); - let ccx = bcx.ccx(); - match *self { - ConstantValue(ConstantExpr(lit_expr), _) => { - let lit_ty = bcx.tcx().node_id_to_type(lit_expr.id); - let expr = consts::const_expr(ccx, &lit_expr, bcx.fcx.param_substs, None, Yes); - let llval = match expr { - Ok((llval, _)) => llval, - Err(err) => { - fatal_const_eval_err(bcx.tcx(), err.as_inner(), lit_expr.span, "pattern"); - } - }; - let lit_datum = immediate_rvalue(llval, lit_ty); - let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx)); - SingleResult(Result::new(bcx, lit_datum.val)) - } - ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => { - let l1 = match consts::const_expr(ccx, &l1, bcx.fcx.param_substs, None, Yes) { - Ok((l1, _)) => l1, - Err(err) => fatal_const_eval_err(bcx.tcx(), err.as_inner(), l1.span, "pattern"), - }; - let l2 = match consts::const_expr(ccx, &l2, bcx.fcx.param_substs, None, Yes) { - Ok((l2, _)) => l2, - Err(err) => fatal_const_eval_err(bcx.tcx(), err.as_inner(), l2.span, "pattern"), - }; - RangeResult(Result::new(bcx, l1), Result::new(bcx, l2)) - } - Variant(disr_val, ref repr, _, _) => { - SingleResult(Result::new(bcx, adt::trans_case(bcx, &repr, disr_val))) - } - SliceLengthEqual(length, _) => { - SingleResult(Result::new(bcx, C_uint(ccx, length))) - } - SliceLengthGreaterOrEqual(prefix, suffix, _) => { - LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix))) - } - } - } - - fn debug_loc(&self) -> DebugLoc { - match *self { - ConstantValue(_,debug_loc) | - ConstantRange(_, _, debug_loc) | - Variant(_, _, _, debug_loc) | - SliceLengthEqual(_, debug_loc) | - SliceLengthGreaterOrEqual(_, _, debug_loc) => debug_loc - } - } -} - -#[derive(Copy, Clone, PartialEq)] -pub enum BranchKind { - NoBranch, - Single, - Switch, - Compare, - CompareSliceLength -} - -pub enum OptResult<'blk, 'tcx: 'blk> { - SingleResult(Result<'blk, 'tcx>), - RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>), - LowerBound(Result<'blk, 'tcx>) -} - -#[derive(Clone, Copy, PartialEq)] -pub enum TransBindingMode { - /// By-value binding for a copy type: copies from matched data - /// into a fresh LLVM alloca. - TrByCopy(/* llbinding */ ValueRef), - - /// By-value binding for a non-copy type where we copy into a - /// fresh LLVM alloca; this most accurately reflects the language - /// semantics (e.g. it properly handles overwrites of the matched - /// input), but potentially injects an unwanted copy. - TrByMoveIntoCopy(/* llbinding */ ValueRef), - - /// Binding a non-copy type by reference under the hood; this is - /// a codegen optimization to avoid unnecessary memory traffic. - TrByMoveRef, - - /// By-ref binding exposed in the original source input. - TrByRef, -} - -impl TransBindingMode { - /// if binding by making a fresh copy; returns the alloca that it - /// will copy into; otherwise None. - fn alloca_if_copy(&self) -> Option { - match *self { - TrByCopy(llbinding) | TrByMoveIntoCopy(llbinding) => Some(llbinding), - TrByMoveRef | TrByRef => None, - } - } -} - -/// Information about a pattern binding: -/// - `llmatch` is a pointer to a stack slot. The stack slot contains a -/// pointer into the value being matched. Hence, llmatch has type `T**` -/// where `T` is the value being matched. -/// - `trmode` is the trans binding mode -/// - `id` is the node id of the binding -/// - `ty` is the Rust type of the binding -#[derive(Clone, Copy)] -pub struct BindingInfo<'tcx> { - pub llmatch: ValueRef, - pub trmode: TransBindingMode, - pub id: ast::NodeId, - pub span: Span, - pub ty: Ty<'tcx>, -} - -type BindingsMap<'tcx> = FnvHashMap>; - -struct ArmData<'p, 'blk, 'tcx: 'blk> { - bodycx: Block<'blk, 'tcx>, - arm: &'p hir::Arm, - bindings_map: BindingsMap<'tcx> -} - -/// Info about Match. -/// If all `pats` are matched then arm `data` will be executed. -/// As we proceed `bound_ptrs` are filled with pointers to values to be bound, -/// these pointers are stored in llmatch variables just before executing `data` arm. -struct Match<'a, 'p: 'a, 'blk: 'a, 'tcx: 'blk> { - pats: Vec<&'p hir::Pat>, - data: &'a ArmData<'p, 'blk, 'tcx>, - bound_ptrs: Vec<(ast::Name, ValueRef)>, - // Thread along renamings done by the check_match::StaticInliner, so we can - // map back to original NodeIds - pat_renaming_map: Option<&'a FnvHashMap<(NodeId, Span), NodeId>> -} - -impl<'a, 'p, 'blk, 'tcx> fmt::Debug for Match<'a, 'p, 'blk, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if ppaux::verbose() { - // for many programs, this just take too long to serialize - write!(f, "{:?}", self.pats) - } else { - write!(f, "{} pats", self.pats.len()) - } - } -} - -fn has_nested_bindings(m: &[Match], col: usize) -> bool { - for br in m { - if let PatKind::Binding(_, _, Some(..)) = br.pats[col].node { - return true - } - } - false -} - -// As noted in `fn match_datum`, we should eventually pass around a -// `Datum` for the `val`; but until we get to that point, this -// `MatchInput` struct will serve -- it has everything `Datum` -// does except for the type field. -#[derive(Copy, Clone)] -pub struct MatchInput { val: ValueRef, lval: Lvalue } - -impl<'tcx> Datum<'tcx, Lvalue> { - pub fn match_input(&self) -> MatchInput { - MatchInput { - val: self.val, - lval: self.kind, - } - } -} - -impl fmt::Debug for MatchInput { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&Value(self.val), f) - } -} - -impl MatchInput { - fn from_val(val: ValueRef) -> MatchInput { - MatchInput { - val: val, - lval: Lvalue::new("MatchInput::from_val"), - } - } - - fn to_datum<'tcx>(self, ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> { - Datum::new(self.val, ty, self.lval) - } -} - -fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput) - -> Vec> { - debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={:?})", - bcx.to_str(), m, col, val); - let _indenter = indenter(); - - m.iter().map(|br| { - let mut bound_ptrs = br.bound_ptrs.clone(); - let mut pat = br.pats[col]; - loop { - pat = match pat.node { - PatKind::Binding(_, ref path, Some(ref inner)) => { - bound_ptrs.push((path.node, val.val)); - &inner - }, - _ => break - } - } - - let mut pats = br.pats.clone(); - pats[col] = pat; - Match { - pats: pats, - data: &br.data, - bound_ptrs: bound_ptrs, - pat_renaming_map: br.pat_renaming_map, - } - }).collect() -} - -fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput, - mut e: F) - -> Vec> where - F: FnMut(&[(&'p hir::Pat, Option>)]) - -> Option>)>>, -{ - debug!("enter_match(bcx={}, m={:?}, col={}, val={:?})", - bcx.to_str(), m, col, val); - let _indenter = indenter(); - - m.iter().filter_map(|br| { - let pats : Vec<_> = br.pats.iter().map(|p| (*p, None)).collect(); - e(&pats).map(|pats| { - let this = br.pats[col]; - let mut bound_ptrs = br.bound_ptrs.clone(); - match this.node { - PatKind::Binding(_, ref path, None) => { - bound_ptrs.push((path.node, val.val)); - } - PatKind::Vec(ref before, Some(ref slice), ref after) => { - if let PatKind::Binding(_, ref path, None) = slice.node { - let subslice_val = bind_subslice_pat( - bcx, this.id, val, - before.len(), after.len()); - bound_ptrs.push((path.node, subslice_val)); - } - } - _ => {} - } - Match { - pats: pats.into_iter().map(|p| p.0).collect(), - data: br.data, - bound_ptrs: bound_ptrs, - pat_renaming_map: br.pat_renaming_map, - } - }) - }).collect() -} - -fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput) - -> Vec> { - debug!("enter_default(bcx={}, m={:?}, col={}, val={:?})", - bcx.to_str(), m, col, val); - let _indenter = indenter(); - - // Collect all of the matches that can match against anything. - enter_match(bcx, m, col, val, |pats| { - match pats[col].0.node { - PatKind::Binding(..) | PatKind::Wild => { - let mut r = pats[..col].to_vec(); - r.extend_from_slice(&pats[col + 1..]); - Some(r) - } - _ => None - } - }) -} - -// nmatsakis: what does enter_opt do? -// in trans/match -// trans/match.rs is like stumbling around in a dark cave -// pcwalton: the enter family of functions adjust the set of -// patterns as needed -// yeah, at some point I kind of achieved some level of -// understanding -// anyhow, they adjust the patterns given that something of that -// kind has been found -// pcwalton: ok, right, so enter_XXX() adjusts the patterns, as I -// said -// enter_match() kind of embodies the generic code -// it is provided with a function that tests each pattern to see -// if it might possibly apply and so forth -// so, if you have a pattern like {a: _, b: _, _} and one like _ -// then _ would be expanded to (_, _) -// one spot for each of the sub-patterns -// enter_opt() is one of the more complex; it covers the fallible -// cases -// enter_rec_or_struct() or enter_tuple() are simpler, since they -// are infallible patterns -// so all patterns must either be records (resp. tuples) or -// wildcards - -/// The above is now outdated in that enter_match() now takes a function that -/// takes the complete row of patterns rather than just the first one. -/// Also, most of the enter_() family functions have been unified with -/// the check_match specialization step. -fn enter_opt<'a, 'p, 'blk, 'tcx>( - bcx: Block<'blk, 'tcx>, - _: ast::NodeId, - m: &[Match<'a, 'p, 'blk, 'tcx>], - opt: &Opt, - col: usize, - variant_size: usize, - val: MatchInput) - -> Vec> { - debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={:?})", - bcx.to_str(), m, *opt, col, val); - let _indenter = indenter(); - - let ctor = match opt { - &ConstantValue(ConstantExpr(expr), _) => Constructor::ConstantValue( - eval_const_expr(bcx.tcx(), &expr) - ), - &ConstantRange(ConstantExpr(lo), ConstantExpr(hi), _) => Constructor::ConstantRange( - eval_const_expr(bcx.tcx(), &lo), - eval_const_expr(bcx.tcx(), &hi) - ), - &SliceLengthEqual(n, _) => - Constructor::Slice(n), - &SliceLengthGreaterOrEqual(before, after, _) => - Constructor::SliceWithSubslice(before, after), - &Variant(_, _, def_id, _) => - Constructor::Variant(def_id) - }; - - let param_env = bcx.tcx().empty_parameter_environment(); - let mcx = check_match::MatchCheckCtxt { - tcx: bcx.tcx(), - param_env: param_env, - }; - enter_match(bcx, m, col, val, |pats| - check_match::specialize(&mcx, &pats[..], &ctor, col, variant_size) - ) -} - -// Returns the options in one column of matches. An option is something that -// needs to be conditionally matched at runtime; for example, the discriminant -// on a set of enum variants or a literal. -fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize) - -> Vec> { - let tcx = bcx.tcx(); - - let mut found: Vec = vec![]; - for br in m { - let cur = br.pats[col]; - let debug_loc = match br.pat_renaming_map { - Some(pat_renaming_map) => { - match pat_renaming_map.get(&(cur.id, cur.span)) { - Some(&id) => DebugLoc::At(id, cur.span), - None => DebugLoc::At(cur.id, cur.span), - } - } - None => DebugLoc::None - }; - - let opt = match cur.node { - PatKind::Lit(ref l) => { - ConstantValue(ConstantExpr(&l), debug_loc) - } - PatKind::Path(..) | PatKind::TupleStruct(..) | PatKind::Struct(..) => { - match tcx.expect_def(cur.id) { - Def::Variant(enum_id, var_id) => { - let variant = tcx.lookup_adt_def(enum_id).variant_with_id(var_id); - Variant(Disr::from(variant.disr_val), - adt::represent_node(bcx, cur.id), - var_id, - debug_loc) - } - _ => continue - } - } - PatKind::Range(ref l1, ref l2) => { - ConstantRange(ConstantExpr(&l1), ConstantExpr(&l2), debug_loc) - } - PatKind::Vec(ref before, None, ref after) => { - SliceLengthEqual(before.len() + after.len(), debug_loc) - } - PatKind::Vec(ref before, Some(_), ref after) => { - SliceLengthGreaterOrEqual(before.len(), after.len(), debug_loc) - } - _ => continue - }; - - if !found.iter().any(|x| x.eq(&opt, tcx)) { - found.push(opt); - } - } - found -} - -struct ExtractedBlock<'blk, 'tcx: 'blk> { - vals: Vec, - bcx: Block<'blk, 'tcx>, -} - -fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, - disr_val: Disr, - val: MatchInput) - -> ExtractedBlock<'blk, 'tcx> { - let _icx = push_ctxt("match::extract_variant_args"); - // Assume enums are always sized for now. - let val = adt::MaybeSizedValue::sized(val.val); - let args = (0..adt::num_args(repr, disr_val)).map(|i| { - adt::trans_field_ptr(bcx, repr, val, disr_val, i) - }).collect(); - - ExtractedBlock { vals: args, bcx: bcx } -} - -/// Helper for converting from the ValueRef that we pass around in the match code, which is always -/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it. -fn match_datum<'tcx>(val: MatchInput, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> { - val.to_datum(left_ty) -} - -fn bind_subslice_pat(bcx: Block, - pat_id: ast::NodeId, - val: MatchInput, - offset_left: usize, - offset_right: usize) -> ValueRef { - let _icx = push_ctxt("match::bind_subslice_pat"); - let vec_ty = node_id_type(bcx, pat_id); - let vec_ty_contents = match vec_ty.sty { - ty::TyBox(ty) => ty, - ty::TyRef(_, mt) | ty::TyRawPtr(mt) => mt.ty, - _ => vec_ty - }; - let unit_ty = vec_ty_contents.sequence_element_type(bcx.tcx()); - let vec_datum = match_datum(val, vec_ty); - let (base, len) = vec_datum.get_vec_base_and_len(bcx); - - let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]); - let diff = offset_left + offset_right; - if let ty::TyArray(ty, n) = vec_ty_contents.sty { - let array_ty = bcx.tcx().mk_array(ty, n-diff); - let llty_array = type_of::type_of(bcx.ccx(), array_ty); - return PointerCast(bcx, slice_begin, llty_array.ptr_to()); - } - - let slice_len_offset = C_uint(bcx.ccx(), diff); - let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None); - let slice_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReErased), - bcx.tcx().mk_slice(unit_ty)); - let scratch = rvalue_scratch_datum(bcx, slice_ty, ""); - Store(bcx, slice_begin, expr::get_dataptr(bcx, scratch.val)); - Store(bcx, slice_len, expr::get_meta(bcx, scratch.val)); - scratch.val -} - -fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - left_ty: Ty<'tcx>, - before: usize, - after: usize, - val: MatchInput) - -> ExtractedBlock<'blk, 'tcx> { - let _icx = push_ctxt("match::extract_vec_elems"); - let vec_datum = match_datum(val, left_ty); - let (base, len) = vec_datum.get_vec_base_and_len(bcx); - let mut elems = vec![]; - elems.extend((0..before).map(|i| GEPi(bcx, base, &[i]))); - elems.extend((0..after).rev().map(|i| { - InBoundsGEP(bcx, base, &[ - Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None) - ]) - })); - ExtractedBlock { vals: elems, bcx: bcx } -} - -// Macro for deciding whether any of the remaining matches fit a given kind of -// pattern. Note that, because the macro is well-typed, either ALL of the -// matches should fit that sort of pattern or NONE (however, some of the -// matches may be wildcards like _ or identifiers). -macro_rules! any_pat { - ($m:expr, $col:expr, $pattern:pat) => ( - ($m).iter().any(|br| { - match br.pats[$col].node { - $pattern => true, - _ => false - } - }) - ) -} - -fn any_uniq_pat(m: &[Match], col: usize) -> bool { - any_pat!(m, col, PatKind::Box(_)) -} - -fn any_region_pat(m: &[Match], col: usize) -> bool { - any_pat!(m, col, PatKind::Ref(..)) -} - -fn any_irrefutable_adt_pat(tcx: TyCtxt, m: &[Match], col: usize) -> bool { - m.iter().any(|br| { - let pat = br.pats[col]; - match pat.node { - PatKind::Tuple(..) => true, - PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => { - match tcx.expect_def(pat.id) { - Def::Struct(..) | Def::TyAlias(..) | Def::AssociatedTy(..) => true, - _ => false, - } - } - _ => false - } - }) -} - -/// What to do when the pattern match fails. -enum FailureHandler { - Infallible, - JumpToBasicBlock(BasicBlockRef), - Unreachable -} - -impl FailureHandler { - fn is_fallible(&self) -> bool { - match *self { - Infallible => false, - _ => true - } - } - - fn is_infallible(&self) -> bool { - !self.is_fallible() - } - - fn handle_fail(&self, bcx: Block) { - match *self { - Infallible => - bug!("attempted to panic in a non-panicking panic handler!"), - JumpToBasicBlock(basic_block) => - Br(bcx, basic_block, DebugLoc::None), - Unreachable => - build::Unreachable(bcx) - } - } -} - -fn pick_column_to_specialize(def_map: &RefCell, m: &[Match]) -> Option { - fn pat_score(def_map: &RefCell, pat: &hir::Pat) -> usize { - match pat.node { - PatKind::Binding(_, _, Some(ref inner)) => pat_score(def_map, &inner), - _ if pat_is_refutable(&def_map.borrow(), pat) => 1, - _ => 0 - } - } - - let column_score = |m: &[Match], col: usize| -> usize { - let total_score = m.iter() - .map(|row| row.pats[col]) - .map(|pat| pat_score(def_map, pat)) - .sum(); - - // Irrefutable columns always go first, they'd only be duplicated in the branches. - if total_score == 0 { - std::usize::MAX - } else { - total_score - } - }; - - let column_contains_any_nonwild_patterns = |&col: &usize| -> bool { - m.iter().any(|row| match row.pats[col].node { - PatKind::Wild => false, - _ => true - }) - }; - - (0..m[0].pats.len()) - .filter(column_contains_any_nonwild_patterns) - .map(|col| (col, column_score(m, col))) - .max_by_key(|&(_, score)| score) - .map(|(col, _)| col) -} - -// Compiles a comparison between two things. -fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - rhs_t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - fn compare_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs_data: ValueRef, - lhs_len: ValueRef, - rhs_data: ValueRef, - rhs_len: ValueRef, - rhs_t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - let did = langcall(bcx.tcx(), - None, - &format!("comparison of `{}`", rhs_t), - StrEqFnLangItem); - let args = [lhs_data, lhs_len, rhs_data, rhs_len]; - Callee::def(bcx.ccx(), did, Substs::empty(bcx.tcx())) - .call(bcx, debug_loc, ArgVals(&args), None) - } - - let _icx = push_ctxt("compare_values"); - if rhs_t.is_scalar() { - let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, hir::BiEq, debug_loc); - return Result::new(cx, cmp); - } - - match rhs_t.sty { - ty::TyRef(_, mt) => match mt.ty.sty { - ty::TyStr => { - let lhs_data = Load(cx, expr::get_dataptr(cx, lhs)); - let lhs_len = Load(cx, expr::get_meta(cx, lhs)); - let rhs_data = Load(cx, expr::get_dataptr(cx, rhs)); - let rhs_len = Load(cx, expr::get_meta(cx, rhs)); - compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc) - } - ty::TyArray(ty, _) | ty::TySlice(ty) => match ty.sty { - ty::TyUint(ast::UintTy::U8) => { - // NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item, - // which calls memcmp(). - let pat_len = val_ty(rhs).element_type().array_length(); - let ty_str_slice = cx.tcx().mk_static_str(); - - let rhs_data = GEPi(cx, rhs, &[0, 0]); - let rhs_len = C_uint(cx.ccx(), pat_len); - - let lhs_data; - let lhs_len; - if val_ty(lhs) == val_ty(rhs) { - // Both the discriminant and the pattern are thin pointers - lhs_data = GEPi(cx, lhs, &[0, 0]); - lhs_len = C_uint(cx.ccx(), pat_len); - } else { - // The discriminant is a fat pointer - let llty_str_slice = type_of::type_of(cx.ccx(), ty_str_slice).ptr_to(); - let lhs_str = PointerCast(cx, lhs, llty_str_slice); - lhs_data = Load(cx, expr::get_dataptr(cx, lhs_str)); - lhs_len = Load(cx, expr::get_meta(cx, lhs_str)); - } - - compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc) - }, - _ => bug!("only byte strings supported in compare_values"), - }, - _ => bug!("only string and byte strings supported in compare_values"), - }, - _ => bug!("only scalars, byte strings, and strings supported in compare_values"), - } -} - -/// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map -fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - bindings_map: &BindingsMap<'tcx>, - cs: Option) - -> Block<'blk, 'tcx> { - for (&name, &binding_info) in bindings_map { - let (llval, aliases_other_state) = match binding_info.trmode { - // By value mut binding for a copy type: load from the ptr - // into the matched value and copy to our alloca - TrByCopy(llbinding) | - TrByMoveIntoCopy(llbinding) => { - let llval = Load(bcx, binding_info.llmatch); - let lvalue = match binding_info.trmode { - TrByCopy(..) => - Lvalue::new("_match::insert_lllocals"), - TrByMoveIntoCopy(..) => { - // match_input moves from the input into a - // separate stack slot. - // - // E.g. consider moving the value `D(A)` out - // of the tuple `(D(A), D(B))` and into the - // local variable `x` via the pattern `(x,_)`, - // leaving the remainder of the tuple `(_, - // D(B))` still to be dropped in the future. - // - // Thus, here we must zero the place that we - // are moving *from*, because we do not yet - // track drop flags for a fragmented parent - // match input expression. - // - // Longer term we will be able to map the move - // into `(x, _)` up to the parent path that - // owns the whole tuple, and mark the - // corresponding stack-local drop-flag - // tracking the first component of the tuple. - let hint_kind = HintKind::ZeroAndMaintain; - Lvalue::new_with_hint("_match::insert_lllocals (match_input)", - bcx, binding_info.id, hint_kind) - } - _ => bug!(), - }; - let datum = Datum::new(llval, binding_info.ty, lvalue); - call_lifetime_start(bcx, llbinding); - bcx = datum.store_to(bcx, llbinding); - if let Some(cs) = cs { - bcx.fcx.schedule_lifetime_end(cs, llbinding); - } - - (llbinding, false) - }, - - // By value move bindings: load from the ptr into the matched value - TrByMoveRef => (Load(bcx, binding_info.llmatch), true), - - // By ref binding: use the ptr into the matched value - TrByRef => (binding_info.llmatch, true), - }; - - - // A local that aliases some other state must be zeroed, since - // the other state (e.g. some parent data that we matched - // into) will still have its subcomponents (such as this - // local) destructed at the end of the parent's scope. Longer - // term, we will properly map such parents to the set of - // unique drop flags for its fragments. - let hint_kind = if aliases_other_state { - HintKind::ZeroAndMaintain - } else { - HintKind::DontZeroJustUse - }; - let lvalue = Lvalue::new_with_hint("_match::insert_lllocals (local)", - bcx, - binding_info.id, - hint_kind); - let datum = Datum::new(llval, binding_info.ty, lvalue); - if let Some(cs) = cs { - let opt_datum = lvalue.dropflag_hint(bcx); - bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch); - bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum); - } - - debug!("binding {} to {:?}", binding_info.id, Value(llval)); - bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum); - debuginfo::create_match_binding_metadata(bcx, name, binding_info); - } - bcx -} - -fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - guard_expr: &hir::Expr, - data: &ArmData<'p, 'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - has_genuine_default: bool) - -> Block<'blk, 'tcx> { - debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals={:?})", - bcx.to_str(), guard_expr, m, vals); - let _indenter = indenter(); - - let mut bcx = insert_lllocals(bcx, &data.bindings_map, None); - - let val = unpack_datum!(bcx, expr::trans(bcx, guard_expr)); - let val = val.to_llbool(bcx); - - for (_, &binding_info) in &data.bindings_map { - if let Some(llbinding) = binding_info.trmode.alloca_if_copy() { - call_lifetime_end(bcx, llbinding) - } - } - - for (_, &binding_info) in &data.bindings_map { - bcx.fcx.lllocals.borrow_mut().remove(&binding_info.id); - } - - with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| { - for (_, &binding_info) in &data.bindings_map { - call_lifetime_end(bcx, binding_info.llmatch); - } - match chk { - // If the default arm is the only one left, move on to the next - // condition explicitly rather than (possibly) falling back to - // the default arm. - &JumpToBasicBlock(_) if m.len() == 1 && has_genuine_default => { - chk.handle_fail(bcx); - } - _ => { - compile_submatch(bcx, m, vals, chk, has_genuine_default); - } - }; - bcx - }) -} - -fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - has_genuine_default: bool) { - debug!("compile_submatch(bcx={}, m={:?}, vals=[{:?}])", - bcx.to_str(), m, vals); - let _indenter = indenter(); - let _icx = push_ctxt("match::compile_submatch"); - let mut bcx = bcx; - if m.is_empty() { - if chk.is_fallible() { - chk.handle_fail(bcx); - } - return; - } - - let tcx = bcx.tcx(); - let def_map = &tcx.def_map; - match pick_column_to_specialize(def_map, m) { - Some(col) => { - let val = vals[col]; - if has_nested_bindings(m, col) { - let expanded = expand_nested_bindings(bcx, m, col, val); - compile_submatch_continue(bcx, - &expanded[..], - vals, - chk, - col, - val, - has_genuine_default) - } else { - compile_submatch_continue(bcx, m, vals, chk, col, val, has_genuine_default) - } - } - None => { - let data = &m[0].data; - for &(ref name, ref value_ptr) in &m[0].bound_ptrs { - let binfo = *data.bindings_map.get(name).unwrap(); - call_lifetime_start(bcx, binfo.llmatch); - if binfo.trmode == TrByRef && type_is_fat_ptr(bcx.tcx(), binfo.ty) { - expr::copy_fat_ptr(bcx, *value_ptr, binfo.llmatch); - } - else { - Store(bcx, *value_ptr, binfo.llmatch); - } - } - match data.arm.guard { - Some(ref guard_expr) => { - bcx = compile_guard(bcx, - &guard_expr, - m[0].data, - &m[1..m.len()], - vals, - chk, - has_genuine_default); - } - _ => () - } - Br(bcx, data.bodycx.llbb, DebugLoc::None); - } - } -} - -fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - col: usize, - val: MatchInput, - has_genuine_default: bool) { - let fcx = bcx.fcx; - let tcx = bcx.tcx(); - - let mut vals_left = vals[0..col].to_vec(); - vals_left.extend_from_slice(&vals[col + 1..]); - let ccx = bcx.fcx.ccx; - - // Find a real id (we're adding placeholder wildcard patterns, but - // each column is guaranteed to have at least one real pattern) - let pat_id = m.iter().map(|br| br.pats[col].id) - .find(|&id| id != DUMMY_NODE_ID) - .unwrap_or(DUMMY_NODE_ID); - - let left_ty = if pat_id == DUMMY_NODE_ID { - tcx.mk_nil() - } else { - node_id_type(bcx, pat_id) - }; - - let mcx = check_match::MatchCheckCtxt { - tcx: bcx.tcx(), - param_env: bcx.tcx().empty_parameter_environment(), - }; - let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) { - let repr = adt::represent_type(bcx.ccx(), left_ty); - let arg_count = adt::num_args(&repr, Disr(0)); - let (arg_count, struct_val) = if type_is_sized(bcx.tcx(), left_ty) { - (arg_count, val.val) - } else { - // For an unsized ADT (i.e. DST struct), we need to treat - // the last field specially: instead of simply passing a - // ValueRef pointing to that field, as with all the others, - // we skip it and instead construct a 'fat ptr' below. - (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val))) - }; - let mut field_vals: Vec = (0..arg_count).map(|ix| - // By definition, these are all sized - adt::trans_field_ptr(bcx, &repr, adt::MaybeSizedValue::sized(struct_val), Disr(0), ix) - ).collect(); - - match left_ty.sty { - ty::TyStruct(def, substs) if !type_is_sized(bcx.tcx(), left_ty) => { - // The last field is technically unsized but - // since we can only ever match that field behind - // a reference we construct a fat ptr here. - let unsized_ty = def.struct_variant().fields.last().map(|field| { - monomorphize::field_ty(bcx.tcx(), substs, field) - }).unwrap(); - let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr"); - - let meta = Load(bcx, expr::get_meta(bcx, val.val)); - let struct_val = adt::MaybeSizedValue::unsized_(struct_val, meta); - - let data = adt::trans_field_ptr(bcx, &repr, struct_val, Disr(0), arg_count); - Store(bcx, data, expr::get_dataptr(bcx, scratch)); - Store(bcx, meta, expr::get_meta(bcx, scratch)); - field_vals.push(scratch); - } - _ => {} - } - Some(field_vals) - } else if any_uniq_pat(m, col) || any_region_pat(m, col) { - let ptr = if type_is_fat_ptr(bcx.tcx(), left_ty) { - val.val - } else { - Load(bcx, val.val) - }; - Some(vec!(ptr)) - } else { - match left_ty.sty { - ty::TyArray(_, n) => { - let args = extract_vec_elems(bcx, left_ty, n, 0, val); - Some(args.vals) - } - _ => None - } - }; - match adt_vals { - Some(field_vals) => { - let pats = enter_match(bcx, m, col, val, |pats| - check_match::specialize(&mcx, pats, - &Constructor::Single, col, - field_vals.len()) - ); - let mut vals: Vec<_> = field_vals.into_iter() - .map(|v|MatchInput::from_val(v)) - .collect(); - vals.extend_from_slice(&vals_left); - compile_submatch(bcx, &pats, &vals, chk, has_genuine_default); - return; - } - _ => () - } - - // Decide what kind of branch we need - let opts = get_branches(bcx, m, col); - debug!("options={:?}", opts); - let mut kind = NoBranch; - let mut test_val = val.val; - debug!("test_val={:?}", Value(test_val)); - if !opts.is_empty() { - match opts[0] { - ConstantValue(..) | ConstantRange(..) => { - test_val = load_if_immediate(bcx, val.val, left_ty); - kind = if left_ty.is_integral() { - Switch - } else { - Compare - }; - } - Variant(_, ref repr, _, _) => { - let (the_kind, val_opt) = adt::trans_switch(bcx, &repr, - val.val, true); - kind = the_kind; - if let Some(tval) = val_opt { test_val = tval; } - } - SliceLengthEqual(..) | SliceLengthGreaterOrEqual(..) => { - let (_, len) = tvec::get_base_and_len(bcx, val.val, left_ty); - test_val = len; - kind = Switch; - } - } - } - for o in &opts { - match *o { - ConstantRange(..) => { kind = Compare; break }, - SliceLengthGreaterOrEqual(..) => { kind = CompareSliceLength; break }, - _ => () - } - } - let else_cx = match kind { - NoBranch | Single => bcx, - _ => bcx.fcx.new_temp_block("match_else") - }; - let sw = if kind == Switch { - build::Switch(bcx, test_val, else_cx.llbb, opts.len()) - } else { - C_int(ccx, 0) // Placeholder for when not using a switch - }; - - let defaults = enter_default(else_cx, m, col, val); - let exhaustive = chk.is_infallible() && defaults.is_empty(); - let len = opts.len(); - - if exhaustive && kind == Switch { - build::Unreachable(else_cx); - } - - // Compile subtrees for each option - for (i, opt) in opts.iter().enumerate() { - // In some cases of range and vector pattern matching, we need to - // override the failure case so that instead of failing, it proceeds - // to try more matching. branch_chk, then, is the proper failure case - // for the current conditional branch. - let mut branch_chk = None; - let mut opt_cx = else_cx; - let debug_loc = opt.debug_loc(); - - if kind == Switch || !exhaustive || i + 1 < len { - opt_cx = bcx.fcx.new_temp_block("match_case"); - match kind { - Single => Br(bcx, opt_cx.llbb, debug_loc), - Switch => { - match opt.trans(bcx) { - SingleResult(r) => { - AddCase(sw, r.val, opt_cx.llbb); - bcx = r.bcx; - } - _ => { - bug!( - "in compile_submatch, expected \ - opt.trans() to return a SingleResult") - } - } - } - Compare | CompareSliceLength => { - let t = if kind == Compare { - left_ty - } else { - tcx.types.usize // vector length - }; - let Result { bcx: after_cx, val: matches } = { - match opt.trans(bcx) { - SingleResult(Result { bcx, val }) => { - compare_values(bcx, test_val, val, t, debug_loc) - } - RangeResult(Result { val: vbegin, .. }, - Result { bcx, val: vend }) => { - let llge = compare_scalar_types(bcx, test_val, vbegin, - t, hir::BiGe, debug_loc); - let llle = compare_scalar_types(bcx, test_val, vend, - t, hir::BiLe, debug_loc); - Result::new(bcx, And(bcx, llge, llle, DebugLoc::None)) - } - LowerBound(Result { bcx, val }) => { - Result::new(bcx, compare_scalar_types(bcx, test_val, - val, t, hir::BiGe, - debug_loc)) - } - } - }; - bcx = fcx.new_temp_block("compare_next"); - - // If none of the sub-cases match, and the current condition - // is guarded or has multiple patterns, move on to the next - // condition, if there is any, rather than falling back to - // the default. - let guarded = m[i].data.arm.guard.is_some(); - let multi_pats = m[i].pats.len() > 1; - if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) { - branch_chk = Some(JumpToBasicBlock(bcx.llbb)); - } - CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, debug_loc); - } - _ => () - } - } else if kind == Compare || kind == CompareSliceLength { - Br(bcx, else_cx.llbb, debug_loc); - } - - let mut size = 0; - let mut unpacked = Vec::new(); - match *opt { - Variant(disr_val, ref repr, _, _) => { - let ExtractedBlock {vals: argvals, bcx: new_bcx} = - extract_variant_args(opt_cx, &repr, disr_val, val); - size = argvals.len(); - unpacked = argvals; - opt_cx = new_bcx; - } - SliceLengthEqual(len, _) => { - let args = extract_vec_elems(opt_cx, left_ty, len, 0, val); - size = args.vals.len(); - unpacked = args.vals.clone(); - opt_cx = args.bcx; - } - SliceLengthGreaterOrEqual(before, after, _) => { - let args = extract_vec_elems(opt_cx, left_ty, before, after, val); - size = args.vals.len(); - unpacked = args.vals.clone(); - opt_cx = args.bcx; - } - ConstantValue(..) | ConstantRange(..) => () - } - let opt_ms = enter_opt(opt_cx, pat_id, m, opt, col, size, val); - let mut opt_vals: Vec<_> = unpacked.into_iter() - .map(|v|MatchInput::from_val(v)) - .collect(); - opt_vals.extend_from_slice(&vals_left[..]); - compile_submatch(opt_cx, - &opt_ms[..], - &opt_vals[..], - branch_chk.as_ref().unwrap_or(chk), - has_genuine_default); - } - - // Compile the fall-through case, if any - if !exhaustive && kind != Single { - if kind == Compare || kind == CompareSliceLength { - Br(bcx, else_cx.llbb, DebugLoc::None); - } - match chk { - // If there is only one default arm left, move on to the next - // condition explicitly rather than (eventually) falling back to - // the last default arm. - &JumpToBasicBlock(_) if defaults.len() == 1 && has_genuine_default => { - chk.handle_fail(else_cx); - } - _ => { - compile_submatch(else_cx, - &defaults[..], - &vals_left[..], - chk, - has_genuine_default); - } - } - } -} - -pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - match_expr: &hir::Expr, - discr_expr: &hir::Expr, - arms: &[hir::Arm], - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::trans_match"); - trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest) -} - -/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body` -fn is_discr_reassigned(bcx: Block, discr: &hir::Expr, body: &hir::Expr) -> bool { - let (vid, field) = match discr.node { - hir::ExprPath(..) => match bcx.tcx().expect_def(discr.id) { - Def::Local(_, vid) | Def::Upvar(_, vid, _, _) => (vid, None), - _ => return false - }, - hir::ExprField(ref base, field) => { - let vid = match bcx.tcx().expect_def_or_none(base.id) { - Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid, - _ => return false - }; - (vid, Some(mc::NamedField(field.node))) - }, - hir::ExprTupField(ref base, field) => { - let vid = match bcx.tcx().expect_def_or_none(base.id) { - Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid, - _ => return false - }; - (vid, Some(mc::PositionalField(field.node))) - }, - _ => return false - }; - - let mut rc = ReassignmentChecker { - node: vid, - field: field, - reassigned: false - }; - bcx.tcx().normalizing_infer_ctxt(Reveal::All).enter(|infcx| { - let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx); - visitor.walk_expr(body); - }); - rc.reassigned -} - -struct ReassignmentChecker { - node: ast::NodeId, - field: Option, - reassigned: bool -} - -// Determine if the expression we're matching on is reassigned to within -// the body of the match's arm. -// We only care for the `mutate` callback since this check only matters -// for cases where the matched value is moved. -impl<'tcx> euv::Delegate<'tcx> for ReassignmentChecker { - fn consume(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: euv::ConsumeMode) {} - fn matched_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::MatchMode) {} - fn consume_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::ConsumeMode) {} - fn borrow(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: ty::Region, - _: ty::BorrowKind, _: euv::LoanCause) {} - fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {} - - fn mutate(&mut self, _: ast::NodeId, _: Span, cmt: mc::cmt, _: euv::MutateMode) { - let cmt_id = |cmt: &mc::cmt| match cmt.cat { - Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, ..}, ..}) | - Categorization::Local(vid) => Some(vid), - Categorization::Interior(ref base_cmt, mc::InteriorField(_)) => Some(base_cmt.id), - _ => None - }; - match cmt.cat { - Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) | - Categorization::Local(vid) => self.reassigned |= self.node == vid, - ref cat => { - let mut cat = cat; - while let &Categorization::Interior(ref base_cmt, mc::InteriorField(field)) = cat { - if let Some(vid) = cmt_id(base_cmt) { - if self.node == vid && (self.field.is_none() || self.field == Some(field)) { - self.reassigned = true; - return; - } - } - cat = &base_cmt.cat; - } - } - } - } -} - -fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &hir::Pat, - discr: &hir::Expr, body: &hir::Expr) - -> BindingsMap<'tcx> { - // Create the bindings map, which is a mapping from each binding name - // to an alloca() that will be the value for that local variable. - // Note that we use the names because each binding will have many ids - // from the various alternatives. - let ccx = bcx.ccx(); - let reassigned = is_discr_reassigned(bcx, discr, body); - let mut bindings_map = FnvHashMap(); - pat_bindings(&pat, |bm, p_id, span, path1| { - let name = path1.node; - let variable_ty = node_id_type(bcx, p_id); - let llvariable_ty = type_of::type_of(ccx, variable_ty); - let tcx = bcx.tcx(); - let param_env = tcx.empty_parameter_environment(); - - let llmatch; - let trmode; - let moves_by_default = variable_ty.moves_by_default(tcx, ¶m_env, span); - match bm { - hir::BindByValue(_) if !moves_by_default || reassigned => - { - llmatch = alloca(bcx, llvariable_ty.ptr_to(), "__llmatch"); - let llcopy = alloca(bcx, llvariable_ty, &bcx.name(name)); - trmode = if moves_by_default { - TrByMoveIntoCopy(llcopy) - } else { - TrByCopy(llcopy) - }; - } - hir::BindByValue(_) => { - // in this case, the final type of the variable will be T, - // but during matching we need to store a *T as explained - // above - llmatch = alloca(bcx, llvariable_ty.ptr_to(), &bcx.name(name)); - trmode = TrByMoveRef; - } - hir::BindByRef(_) => { - llmatch = alloca(bcx, llvariable_ty, &bcx.name(name)); - trmode = TrByRef; - } - }; - bindings_map.insert(name, BindingInfo { - llmatch: llmatch, - trmode: trmode, - id: p_id, - span: span, - ty: variable_ty - }); - }); - return bindings_map; -} - -fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>, - match_id: ast::NodeId, - discr_expr: &hir::Expr, - arms: &[hir::Arm], - dest: Dest) -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::trans_match_inner"); - let fcx = scope_cx.fcx; - let mut bcx = scope_cx; - let tcx = bcx.tcx(); - - let discr_datum = unpack_datum!(bcx, expr::trans_to_lvalue(bcx, discr_expr, - "match")); - if bcx.unreachable.get() { - return bcx; - } - - let t = node_id_type(bcx, discr_expr.id); - let chk = if t.is_uninhabited(tcx) { - Unreachable - } else { - Infallible - }; - - let arm_datas: Vec = arms.iter().map(|arm| ArmData { - bodycx: fcx.new_id_block("case_body", arm.body.id), - arm: arm, - bindings_map: create_bindings_map(bcx, &arm.pats[0], discr_expr, &arm.body) - }).collect(); - - let mut pat_renaming_map = if scope_cx.sess().opts.debuginfo != NoDebugInfo { - Some(FnvHashMap()) - } else { - None - }; - - let arm_pats: Vec>> = { - let mut static_inliner = StaticInliner::new(scope_cx.tcx(), - pat_renaming_map.as_mut()); - arm_datas.iter().map(|arm_data| { - arm_data.arm.pats.iter().map(|p| static_inliner.fold_pat((*p).clone())).collect() - }).collect() - }; - - let mut matches = Vec::new(); - for (arm_data, pats) in arm_datas.iter().zip(&arm_pats) { - matches.extend(pats.iter().map(|p| Match { - pats: vec![&p], - data: arm_data, - bound_ptrs: Vec::new(), - pat_renaming_map: pat_renaming_map.as_ref() - })); - } - - // `compile_submatch` works one column of arm patterns a time and - // then peels that column off. So as we progress, it may become - // impossible to tell whether we have a genuine default arm, i.e. - // `_ => foo` or not. Sometimes it is important to know that in order - // to decide whether moving on to the next condition or falling back - // to the default arm. - let has_default = arms.last().map_or(false, |arm| { - arm.pats.len() == 1 - && arm.pats.last().unwrap().node == PatKind::Wild - }); - - compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default); - - let mut arm_cxs = Vec::new(); - for arm_data in &arm_datas { - let mut bcx = arm_data.bodycx; - - // insert bindings into the lllocals map and add cleanups - let cs = fcx.push_custom_cleanup_scope(); - bcx = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs))); - bcx = expr::trans_into(bcx, &arm_data.arm.body, dest); - bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cs); - arm_cxs.push(bcx); - } - - bcx = scope_cx.fcx.join_blocks(match_id, &arm_cxs[..]); - return bcx; -} - -/// Generates code for a local variable declaration like `let ;` or `let = -/// `. -pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - local: &hir::Local) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::store_local"); - let mut bcx = bcx; - let tcx = bcx.tcx(); - let pat = &local.pat; - - fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - pat: &hir::Pat) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("create_dummy_locals"); - // create dummy memory for the variables if we have no - // value to store into them immediately - let tcx = bcx.tcx(); - pat_bindings(pat, |_, p_id, _, path1| { - let scope = cleanup::var_scope(tcx, p_id); - bcx = mk_binding_alloca( - bcx, p_id, path1.node, scope, (), - "_match::store_local::create_dummy_locals", - |(), bcx, Datum { val: llval, ty, kind }| { - // Dummy-locals start out uninitialized, so set their - // drop-flag hints (if any) to "moved." - if let Some(hint) = kind.dropflag_hint(bcx) { - let moved_hint = adt::DTOR_MOVED_HINT; - debug!("store moved_hint={} for hint={:?}, uninitialized dummy", - moved_hint, hint); - Store(bcx, C_u8(bcx.fcx.ccx, moved_hint), hint.to_value().value()); - } - - if kind.drop_flag_info.must_zero() { - // if no drop-flag hint, or the hint requires - // we maintain the embedded drop-flag, then - // mark embedded drop-flag(s) as moved - // (i.e. "already dropped"). - drop_done_fill_mem(bcx, llval, ty); - } - bcx - }); - }); - bcx - } - - match local.init { - Some(ref init_expr) => { - // Optimize the "let x = expr" case. This just writes - // the result of evaluating `expr` directly into the alloca - // for `x`. Often the general path results in similar or the - // same code post-optimization, but not always. In particular, - // in unsafe code, you can have expressions like - // - // let x = intrinsics::uninit(); - // - // In such cases, the more general path is unsafe, because - // it assumes it is matching against a valid value. - if let Some(name) = simple_name(pat) { - let var_scope = cleanup::var_scope(tcx, local.id); - return mk_binding_alloca( - bcx, pat.id, name, var_scope, (), - "_match::store_local", - |(), bcx, Datum { val: v, .. }| expr::trans_into(bcx, &init_expr, - expr::SaveIn(v))); - } - - // General path. - let init_datum = - unpack_datum!(bcx, expr::trans_to_lvalue(bcx, &init_expr, "let")); - if bcx.sess().asm_comments() { - add_comment(bcx, "creating zeroable ref llval"); - } - let var_scope = cleanup::var_scope(tcx, local.id); - bind_irrefutable_pat(bcx, pat, init_datum.match_input(), var_scope) - } - None => { - create_dummy_locals(bcx, pat) - } - } -} - -fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, - p_id: ast::NodeId, - name: ast::Name, - cleanup_scope: cleanup::ScopeId, - arg: A, - caller_name: &'static str, - populate: F) - -> Block<'blk, 'tcx> where - F: FnOnce(A, Block<'blk, 'tcx>, Datum<'tcx, Lvalue>) -> Block<'blk, 'tcx>, -{ - let var_ty = node_id_type(bcx, p_id); - - // Allocate memory on stack for the binding. - let llval = alloc_ty(bcx, var_ty, &bcx.name(name)); - let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse); - let datum = Datum::new(llval, var_ty, lvalue); - - debug!("mk_binding_alloca cleanup_scope={:?} llval={:?} var_ty={:?}", - cleanup_scope, Value(llval), var_ty); - - // Subtle: be sure that we *populate* the memory *before* - // we schedule the cleanup. - call_lifetime_start(bcx, llval); - let bcx = populate(arg, bcx, datum); - bcx.fcx.schedule_lifetime_end(cleanup_scope, llval); - bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx)); - - // Now that memory is initialized and has cleanup scheduled, - // insert datum into the local variable map. - bcx.fcx.lllocals.borrow_mut().insert(p_id, datum); - bcx -} - -/// A simple version of the pattern matching code that only handles -/// irrefutable patterns. This is used in let/argument patterns, -/// not in match statements. Unifying this code with the code above -/// sounds nice, but in practice it produces very inefficient code, -/// since the match code is so much more general. In most cases, -/// LLVM is able to optimize the code, but it causes longer compile -/// times and makes the generated code nigh impossible to read. -/// -/// # Arguments -/// - bcx: starting basic block context -/// - pat: the irrefutable pattern being matched. -/// - val: the value being matched -- must be an lvalue (by ref, with cleanup) -pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - pat: &hir::Pat, - val: MatchInput, - cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> { - debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={:?})", - bcx.to_str(), pat, val); - - if bcx.sess().asm_comments() { - add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})", - pat)); - } - - let _indenter = indenter(); - - let _icx = push_ctxt("match::bind_irrefutable_pat"); - let mut bcx = bcx; - let tcx = bcx.tcx(); - let ccx = bcx.ccx(); - match pat.node { - PatKind::Binding(pat_binding_mode, ref path1, ref inner) => { - // Allocate the stack slot where the value of this - // binding will live and place it into the appropriate - // map. - bcx = mk_binding_alloca(bcx, pat.id, path1.node, cleanup_scope, (), - "_match::bind_irrefutable_pat", - |(), bcx, Datum { val: llval, ty, kind: _ }| { - match pat_binding_mode { - hir::BindByValue(_) => { - // By value binding: move the value that `val` - // points at into the binding's stack slot. - let d = val.to_datum(ty); - d.store_to(bcx, llval) - } - - hir::BindByRef(_) => { - // By ref binding: the value of the variable - // is the pointer `val` itself or fat pointer referenced by `val` - if type_is_fat_ptr(bcx.tcx(), ty) { - expr::copy_fat_ptr(bcx, val.val, llval); - } - else { - Store(bcx, val.val, llval); - } - - bcx - } - } - }); - - if let Some(ref inner_pat) = *inner { - bcx = bind_irrefutable_pat(bcx, &inner_pat, val, cleanup_scope); - } - } - PatKind::TupleStruct(_, ref sub_pats, ddpos) => { - match bcx.tcx().expect_def(pat.id) { - Def::Variant(enum_id, var_id) => { - let repr = adt::represent_node(bcx, pat.id); - let vinfo = ccx.tcx().lookup_adt_def(enum_id).variant_with_id(var_id); - let args = extract_variant_args(bcx, - &repr, - Disr::from(vinfo.disr_val), - val); - for (i, subpat) in sub_pats.iter() - .enumerate_and_adjust(vinfo.fields.len(), ddpos) { - bcx = bind_irrefutable_pat( - bcx, - subpat, - MatchInput::from_val(args.vals[i]), - cleanup_scope); - } - } - Def::Struct(..) => { - let expected_len = match *ccx.tcx().pat_ty(&pat) { - ty::TyS{sty: ty::TyStruct(adt_def, _), ..} => { - adt_def.struct_variant().fields.len() - } - ref ty => { - span_bug!(pat.span, "tuple struct pattern unexpected type {:?}", ty); - } - }; - - let repr = adt::represent_node(bcx, pat.id); - let val = adt::MaybeSizedValue::sized(val.val); - for (i, elem) in sub_pats.iter().enumerate_and_adjust(expected_len, ddpos) { - let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i); - bcx = bind_irrefutable_pat( - bcx, - &elem, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - _ => { - // Nothing to do here. - } - } - } - PatKind::Struct(_, ref fields, _) => { - let tcx = bcx.tcx(); - let pat_ty = node_id_type(bcx, pat.id); - let pat_repr = adt::represent_type(bcx.ccx(), pat_ty); - let pat_v = VariantInfo::of_node(tcx, pat_ty, pat.id); - - let val = if type_is_sized(tcx, pat_ty) { - adt::MaybeSizedValue::sized(val.val) - } else { - let data = Load(bcx, expr::get_dataptr(bcx, val.val)); - let meta = Load(bcx, expr::get_meta(bcx, val.val)); - adt::MaybeSizedValue::unsized_(data, meta) - }; - - for f in fields { - let name = f.node.name; - let field_idx = pat_v.field_index(name); - let mut fldptr = adt::trans_field_ptr( - bcx, - &pat_repr, - val, - pat_v.discr, - field_idx); - - let fty = pat_v.fields[field_idx].1; - // If it's not sized, then construct a fat pointer instead of - // a regular one - if !type_is_sized(tcx, fty) { - let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr"); - debug!("Creating fat pointer {:?}", Value(scratch)); - Store(bcx, fldptr, expr::get_dataptr(bcx, scratch)); - Store(bcx, val.meta, expr::get_meta(bcx, scratch)); - fldptr = scratch; - } - bcx = bind_irrefutable_pat(bcx, - &f.node.pat, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - PatKind::Tuple(ref elems, ddpos) => { - match tcx.node_id_to_type(pat.id).sty { - ty::TyTuple(ref tys) => { - let repr = adt::represent_node(bcx, pat.id); - let val = adt::MaybeSizedValue::sized(val.val); - for (i, elem) in elems.iter().enumerate_and_adjust(tys.len(), ddpos) { - let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i); - bcx = bind_irrefutable_pat( - bcx, - &elem, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty), - } - } - PatKind::Box(ref inner) => { - let pat_ty = node_id_type(bcx, inner.id); - // Pass along DSTs as fat pointers. - let val = if type_is_fat_ptr(tcx, pat_ty) { - // We need to check for this, as the pattern could be binding - // a fat pointer by-value. - if let PatKind::Binding(hir::BindByRef(..),_,_) = inner.node { - val.val - } else { - Load(bcx, val.val) - } - } else if type_is_sized(tcx, pat_ty) { - Load(bcx, val.val) - } else { - val.val - }; - bcx = bind_irrefutable_pat( - bcx, &inner, MatchInput::from_val(val), cleanup_scope); - } - PatKind::Ref(ref inner, _) => { - let pat_ty = node_id_type(bcx, inner.id); - // Pass along DSTs as fat pointers. - let val = if type_is_fat_ptr(tcx, pat_ty) { - // We need to check for this, as the pattern could be binding - // a fat pointer by-value. - if let PatKind::Binding(hir::BindByRef(..),_,_) = inner.node { - val.val - } else { - Load(bcx, val.val) - } - } else if type_is_sized(tcx, pat_ty) { - Load(bcx, val.val) - } else { - val.val - }; - bcx = bind_irrefutable_pat( - bcx, - &inner, - MatchInput::from_val(val), - cleanup_scope); - } - PatKind::Vec(ref before, ref slice, ref after) => { - let pat_ty = node_id_type(bcx, pat.id); - let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val); - match slice { - &Some(_) => { - extracted.vals.insert( - before.len(), - bind_subslice_pat(bcx, pat.id, val, before.len(), after.len()) - ); - } - &None => () - } - bcx = before - .iter() - .chain(slice.iter()) - .chain(after.iter()) - .zip(extracted.vals) - .fold(bcx, |bcx, (inner, elem)| { - bind_irrefutable_pat( - bcx, - &inner, - MatchInput::from_val(elem), - cleanup_scope) - }); - } - PatKind::Path(..) | PatKind::Wild | - PatKind::Lit(..) | PatKind::Range(..) => () - } - return bcx; -} diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index d48ec98a20dfb..2fb7a69d36186 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -53,14 +53,9 @@ use rustc::ty::{self, Ty, TyCtxt}; use syntax::ast; use syntax::attr; use syntax::attr::IntType; -use _match; use abi::FAT_PTR_ADDR; -use base::InitAlloca; use build::*; -use cleanup; -use cleanup::CleanupMethods; use common::*; -use datum; use debuginfo::DebugLoc; use glue; use machine; @@ -69,27 +64,13 @@ use type_::Type; use type_of; use value::Value; -type Hint = attr::ReprAttr; - -// Representation of the context surrounding an unsized type. I want -// to be able to track the drop flags that are injected by trans. -#[derive(Clone, Copy, PartialEq, Debug)] -pub struct TypeContext { - prefix: Type, - needs_drop_flag: bool, +#[derive(Copy, Clone, PartialEq)] +pub enum BranchKind { + Switch, + Single } -impl TypeContext { - pub fn prefix(&self) -> Type { self.prefix } - pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag } - - fn direct(t: Type) -> TypeContext { - TypeContext { prefix: t, needs_drop_flag: false } - } - fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext { - TypeContext { prefix: t, needs_drop_flag: needs_drop_flag } - } -} +type Hint = attr::ReprAttr; /// Representations. #[derive(Eq, PartialEq, Debug)] @@ -97,22 +78,10 @@ pub enum Repr<'tcx> { /// C-like enums; basically an int. CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType) /// Single-case variants, and structs/tuples/records. - /// - /// Structs with destructors need a dynamic destroyedness flag to - /// avoid running the destructor too many times; this is included - /// in the `Struct` if present. - /// (The flag if nonzero, represents the initialization value to use; - /// if zero, then use no flag at all.) - Univariant(Struct<'tcx>, u8), + Univariant(Struct<'tcx>), /// General-case enums: for each case there is a struct, and they /// all start with a field for the discriminant. - /// - /// Types with destructors need a dynamic destroyedness flag to - /// avoid running the destructor too many times; the last argument - /// indicates whether such a flag is present. - /// (The flag, if nonzero, represents the initialization value to use; - /// if zero, then use no flag at all.) - General(IntType, Vec>, u8), + General(IntType, Vec>), /// Two cases distinguished by a nullable pointer: the case with discriminant /// `nndiscr` must have single field which is known to be nonnull due to its type. /// The other case is known to be zero sized. Hence we represent the enum @@ -178,14 +147,6 @@ impl MaybeSizedValue { } } -/// Convenience for `represent_type`. There should probably be more or -/// these, for places in trans where the `Ty` isn't directly -/// available. -pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node: ast::NodeId) -> Rc> { - represent_type(bcx.ccx(), node_id_type(bcx, node)) -} - /// Decides how to represent a given type. pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) @@ -201,91 +162,36 @@ pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, repr } -const fn repeat_u8_as_u32(val: u8) -> u32 { - (val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32 -} - -const fn repeat_u8_as_u64(val: u8) -> u64 { - (repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64 -} - -/// `DTOR_NEEDED_HINT` is a stack-local hint that just means -/// "we do not know whether the destructor has run or not; check the -/// drop-flag embedded in the value itself." -pub const DTOR_NEEDED_HINT: u8 = 0x3d; - -/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has -/// definitely been moved; you do not need to run its destructor." -/// -/// (However, for now, such values may still end up being explicitly -/// zeroed by the generated code; this is the distinction between -/// `datum::DropFlagInfo::ZeroAndMaintain` versus -/// `datum::DropFlagInfo::DontZeroJustUse`.) -pub const DTOR_MOVED_HINT: u8 = 0x2d; - -pub const DTOR_NEEDED: u8 = 0xd4; -#[allow(dead_code)] -pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED); - -pub const DTOR_DONE: u8 = 0x1d; -#[allow(dead_code)] -pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE); - -fn dtor_to_init_u8(dtor: bool) -> u8 { - if dtor { DTOR_NEEDED } else { 0 } -} - -pub trait GetDtorType<'tcx> { fn dtor_type(self) -> Ty<'tcx>; } -impl<'a, 'tcx> GetDtorType<'tcx> for TyCtxt<'a, 'tcx, 'tcx> { - fn dtor_type(self) -> Ty<'tcx> { self.types.u8 } -} - -fn dtor_active(flag: u8) -> bool { - flag != 0 -} - fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Repr<'tcx> { match t.sty { ty::TyTuple(ref elems) => { - Univariant(mk_struct(cx, &elems[..], false, t), 0) + Univariant(mk_struct(cx, &elems[..], false, t)) } ty::TyStruct(def, substs) => { - let mut ftys = def.struct_variant().fields.iter().map(|field| { + let ftys = def.struct_variant().fields.iter().map(|field| { monomorphize::field_ty(cx.tcx(), substs, field) }).collect::>(); let packed = cx.tcx().lookup_packed(def.did); - // FIXME(16758) don't add a drop flag to unsized structs, as it - // won't actually be in the location we say it is because it'll be after - // the unsized field. Several other pieces of code assume that the unsized - // field is definitely the last one. - let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t); - if dtor { - ftys.push(cx.tcx().dtor_type()); - } - Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor)) + Univariant(mk_struct(cx, &ftys[..], packed, t)) } ty::TyClosure(_, ref substs) => { - Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0) + Univariant(mk_struct(cx, &substs.upvar_tys, false, t)) } ty::TyEnum(def, substs) => { let cases = get_cases(cx.tcx(), def, substs); let hint = *cx.tcx().lookup_repr_hints(def.did).get(0) .unwrap_or(&attr::ReprAny); - let dtor = def.dtor_kind().has_drop_flag(); - if cases.is_empty() { // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) assert_eq!(hint, attr::ReprAny); - let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() }; - return Univariant(mk_struct(cx, &ftys[..], false, t), - dtor_to_init_u8(dtor)); + return Univariant(mk_struct(cx, &[], false, t)); } - if !dtor && cases.iter().all(|c| c.tys.is_empty()) { + if cases.iter().all(|c| c.tys.is_empty()) { // All bodies empty -> intlike let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect(); let bounds = IntBounds { @@ -307,13 +213,10 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, if cases.len() == 1 && hint == attr::ReprAny { // Equivalent to a struct/tuple/newtype. - let mut ftys = cases[0].tys.clone(); - if dtor { ftys.push(cx.tcx().dtor_type()); } - return Univariant(mk_struct(cx, &ftys[..], false, t), - dtor_to_init_u8(dtor)); + return Univariant(mk_struct(cx, &cases[0].tys, false, t)); } - if !dtor && cases.len() == 2 && hint == attr::ReprAny { + if cases.len() == 2 && hint == attr::ReprAny { // Nullable pointer optimization let mut discr = 0; while discr < 2 { @@ -356,7 +259,6 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let fields : Vec<_> = cases.iter().map(|c| { let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity)); ftys.extend_from_slice(&c.tys); - if dtor { ftys.push(cx.tcx().dtor_type()); } mk_struct(cx, &ftys, false, t) }).collect(); @@ -418,13 +320,12 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let fields : Vec<_> = cases.iter().map(|c| { let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity)); ftys.extend_from_slice(&c.tys); - if dtor { ftys.push(cx.tcx().dtor_type()); } mk_struct(cx, &ftys[..], false, t) }).collect(); ensure_enum_fits_in_address_space(cx, &fields[..], t); - General(ity, fields, dtor_to_init_u8(dtor)) + General(ity, fields) } _ => bug!("adt::represent_type called on non-ADT type: {}", t) } @@ -722,9 +623,7 @@ fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, /// and fill in the actual contents in a second pass to prevent /// unbounded recursion; see also the comments in `trans::type_of`. pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type { - let c = generic_type_of(cx, r, None, false, false, false); - assert!(!c.needs_drop_flag); - c.prefix + generic_type_of(cx, r, None, false, false) } @@ -733,25 +632,19 @@ pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type { // are going to get the wrong type (it will not include the unsized parts of it). pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, dst: bool) -> Type { - let c = generic_type_of(cx, r, None, true, dst, false); - assert!(!c.needs_drop_flag); - c.prefix -} -pub fn sizing_type_context_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, dst: bool) -> TypeContext { - generic_type_of(cx, r, None, true, dst, true) + generic_type_of(cx, r, None, true, dst) } + pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, name: &str) -> Type { - let c = generic_type_of(cx, r, Some(name), false, false, false); - assert!(!c.needs_drop_flag); - c.prefix + generic_type_of(cx, r, Some(name), false, false) } + pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, llty: &mut Type) { match *r { CEnum(..) | General(..) | RawNullablePointer { .. } => { } - Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => + Univariant(ref st) | StructWrappedNullablePointer { nonnull: ref st, .. } => llty.set_struct_body(&struct_llfields(cx, st, false, false), st.packed) } @@ -761,50 +654,40 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, name: Option<&str>, sizing: bool, - dst: bool, - delay_drop_flag: bool) -> TypeContext { - debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}", - r, name, sizing, dst, delay_drop_flag); + dst: bool) -> Type { + debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {}", + r, name, sizing, dst); match *r { - CEnum(ity, _, _) => TypeContext::direct(ll_inttype(cx, ity)), + CEnum(ity, _, _) => ll_inttype(cx, ity), RawNullablePointer { nnty, .. } => - TypeContext::direct(type_of::sizing_type_of(cx, nnty)), + type_of::sizing_type_of(cx, nnty), StructWrappedNullablePointer { nonnull: ref st, .. } => { match name { None => { - TypeContext::direct( - Type::struct_(cx, &struct_llfields(cx, st, sizing, dst), - st.packed)) + Type::struct_(cx, &struct_llfields(cx, st, sizing, dst), + st.packed) } Some(name) => { assert_eq!(sizing, false); - TypeContext::direct(Type::named_struct(cx, name)) + Type::named_struct(cx, name) } } } - Univariant(ref st, dtor_needed) => { - let dtor_needed = dtor_needed != 0; + Univariant(ref st) => { match name { None => { - let mut fields = struct_llfields(cx, st, sizing, dst); - if delay_drop_flag && dtor_needed { - fields.pop(); - } - TypeContext::may_need_drop_flag( - Type::struct_(cx, &fields, - st.packed), - delay_drop_flag && dtor_needed) + let fields = struct_llfields(cx, st, sizing, dst); + Type::struct_(cx, &fields, st.packed) } Some(name) => { // Hypothesis: named_struct's can never need a // drop flag. (... needs validation.) assert_eq!(sizing, false); - TypeContext::direct(Type::named_struct(cx, name)) + Type::named_struct(cx, name) } } } - General(ity, ref sts, dtor_needed) => { - let dtor_needed = dtor_needed != 0; + General(ity, ref sts) => { // We need a representation that has: // * The alignment of the most-aligned field // * The size of the largest variant (rounded up to that alignment) @@ -836,25 +719,18 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }; assert_eq!(machine::llalign_of_min(cx, fill_ty), align); assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly - let mut fields: Vec = + let fields: Vec = [discr_ty, Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size), fill_ty].iter().cloned().collect(); - if delay_drop_flag && dtor_needed { - fields.pop(); - } match name { None => { - TypeContext::may_need_drop_flag( - Type::struct_(cx, &fields[..], false), - delay_drop_flag && dtor_needed) + Type::struct_(cx, &fields[..], false) } Some(name) => { let mut llty = Type::named_struct(cx, name); llty.set_struct_body(&fields[..], false); - TypeContext::may_need_drop_flag( - llty, - delay_drop_flag && dtor_needed) + llty } } } @@ -873,22 +749,19 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>, /// Obtain a representation of the discriminant sufficient to translate /// destructuring; this may or may not involve the actual discriminant. -/// -/// This should ideally be less tightly tied to `_match`. pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, scrutinee: ValueRef, range_assert: bool) - -> (_match::BranchKind, Option) { + -> (BranchKind, Option) { match *r { CEnum(..) | General(..) | RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { - (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None, - range_assert))) + (BranchKind::Switch, Some(trans_get_discr(bcx, r, scrutinee, None, range_assert))) } Univariant(..) => { // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). - (_match::Single, None) + (BranchKind::Single, None) } } } @@ -896,7 +769,7 @@ pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool { match *r { CEnum(ity, _, _) => ity.is_signed(), - General(ity, _, _) => ity.is_signed(), + General(ity, _) => ity.is_signed(), Univariant(..) => false, RawNullablePointer { .. } => false, StructWrappedNullablePointer { .. } => false, @@ -913,7 +786,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, CEnum(ity, min, max) => { load_discr(bcx, ity, scrutinee, min, max, range_assert) } - General(ity, ref cases, _) => { + General(ity, ref cases) => { let ptr = StructGEP(bcx, scrutinee, 0); load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1), range_assert) @@ -977,7 +850,7 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr) CEnum(ity, _, _) => { C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) } - General(ity, _, _) => { + General(ity, _) => { C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) } Univariant(..) => { @@ -1001,21 +874,12 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), val); } - General(ity, ref cases, dtor) => { - if dtor_active(dtor) { - let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr, - cases[discr.0 as usize].fields.len() - 2); - Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr); - } + General(ity, _) => { Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), StructGEP(bcx, val, 0)); } - Univariant(ref st, dtor) => { + Univariant(_) => { assert_eq!(discr, Disr(0)); - if dtor_active(dtor) { - Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), - StructGEP(bcx, val, st.fields.len() - 1)); - } } RawNullablePointer { nndiscr, nnty, ..} => { if discr != nndiscr { @@ -1046,28 +910,6 @@ fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) { } } -/// The number of fields in a given case; for use when obtaining this -/// information from the type or definition is less convenient. -pub fn num_args(r: &Repr, discr: Disr) -> usize { - match *r { - CEnum(..) => 0, - Univariant(ref st, dtor) => { - assert_eq!(discr, Disr(0)); - st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 }) - } - General(_, ref cases, dtor) => { - cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 }) - } - RawNullablePointer { nndiscr, ref nullfields, .. } => { - if discr == nndiscr { 1 } else { nullfields.len() } - } - StructWrappedNullablePointer { ref nonnull, nndiscr, - ref nullfields, .. } => { - if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() } - } - } -} - /// Access a field, at a point when the value's case is known. pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { @@ -1087,11 +929,11 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, CEnum(..) => { bug!("element access in C-like enum") } - Univariant(ref st, _dtor) => { + Univariant(ref st) => { assert_eq!(discr, Disr(0)); struct_field_ptr(bcx, st, val, ix, false) } - General(_, ref cases, _) => { + General(_, ref cases) => { struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true) } RawNullablePointer { nndiscr, ref nullfields, .. } | @@ -1218,108 +1060,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.pointercast(byte_ptr, ll_fty.ptr_to()) } -pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, - value: ValueRef, - mut f: F) - -> Block<'blk, 'tcx> where - F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>, -{ - let fcx = bcx.fcx; - match *r { - Univariant(ref st, _) => { - f(bcx, st, value) - } - General(ity, ref cases, _) => { - let ccx = bcx.ccx(); - - // See the comments in trans/base.rs for more information (inside - // iter_structural_ty), but the gist here is that if the enum's - // discriminant is *not* in the range that we're expecting (in which - // case we'll take the fall-through branch on the switch - // instruction) then we can't just optimize this to an Unreachable - // block. - // - // Currently we still have filling drop, so this means that the drop - // glue for enums may be called when the enum has been paved over - // with the "I've been dropped" value. In this case the default - // branch of the switch instruction will actually be taken at - // runtime, so the basic block isn't actually unreachable, so we - // need to make it do something with defined behavior. In this case - // we just return early from the function. - // - // Note that this is also why the `trans_get_discr` below has - // `false` to indicate that loading the discriminant should - // not have a range assert. - let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void"); - RetVoid(ret_void_cx, DebugLoc::None); - - let discr_val = trans_get_discr(bcx, r, value, None, false); - let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len()); - let bcx_next = fcx.new_temp_block("enum-variant-iter-next"); - - for (discr, case) in cases.iter().enumerate() { - let mut variant_cx = fcx.new_temp_block( - &format!("enum-variant-iter-{}", &discr.to_string()) - ); - let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true); - AddCase(llswitch, rhs_val, variant_cx.llbb); - - let fields = case.fields.iter().map(|&ty| - type_of::type_of(bcx.ccx(), ty)).collect::>(); - let real_ty = Type::struct_(ccx, &fields[..], case.packed); - let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to()); - - variant_cx = f(variant_cx, case, variant_value); - Br(variant_cx, bcx_next.llbb, DebugLoc::None); - } - - bcx_next - } - _ => bug!() - } -} - -/// Access the struct drop flag, if present. -pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, - val: ValueRef) - -> datum::DatumBlock<'blk, 'tcx, datum::Expr> -{ - let tcx = bcx.tcx(); - let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type()); - match *r { - Univariant(ref st, dtor) if dtor_active(dtor) => { - let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1); - datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock() - } - General(_, _, dtor) if dtor_active(dtor) => { - let fcx = bcx.fcx; - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( - bcx, tcx.dtor_type(), "drop_flag", - InitAlloca::Uninit("drop flag itself has no dtor"), - cleanup::CustomScope(custom_cleanup_scope), |bcx, _| { - debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}", - tcx.dtor_type()); - bcx - } - )); - bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { - let ptr = struct_field_ptr(&variant_cx.build(), st, - MaybeSizedValue::sized(value), - (st.fields.len() - 1), false); - datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr")) - .store_to(variant_cx, scratch.val) - }); - let expr_datum = scratch.to_expr_datum(); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - datum::DatumBlock::new(bcx, expr_datum) - } - _ => bug!("tried to get drop flag of non-droppable type") - } -} - /// Construct a constant value, suitable for initializing a /// GlobalVariable, given a case and constant values for its fields. /// Note that this may have a different LLVM type (and different @@ -1347,7 +1087,7 @@ pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr assert_discr_in_range(ity, min, max, discr); C_integral(ll_inttype(ccx, ity), discr.0, true) } - General(ity, ref cases, _) => { + General(ity, ref cases) => { let case = &cases[discr.0 as usize]; let (max_sz, _) = union_size_and_align(&cases[..]); let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true); @@ -1357,7 +1097,7 @@ pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]); C_struct(ccx, &contents[..], false) } - Univariant(ref st, _dro) => { + Univariant(ref st) => { assert_eq!(discr, Disr(0)); let contents = build_const_struct(ccx, st, vals); C_struct(ccx, &contents[..], st.packed) @@ -1458,28 +1198,6 @@ fn padding(ccx: &CrateContext, size: u64) -> ValueRef { #[inline] fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } -/// Get the discriminant of a constant value. -pub fn const_get_discrim(r: &Repr, val: ValueRef) -> Disr { - match *r { - CEnum(ity, _, _) => { - match ity { - attr::SignedInt(..) => Disr(const_to_int(val) as u64), - attr::UnsignedInt(..) => Disr(const_to_uint(val)), - } - } - General(ity, _, _) => { - match ity { - attr::SignedInt(..) => Disr(const_to_int(const_get_elt(val, &[0])) as u64), - attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(val, &[0]))) - } - } - Univariant(..) => Disr(0), - RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { - bug!("const discrim access of non c-like enum") - } - } -} - /// Extract a field of a constant value, as appropriate for its /// representation. /// diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 5514fb0f4efc3..308118b1fbc6c 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -14,28 +14,29 @@ use llvm::{self, ValueRef}; use base; use build::*; use common::*; -use datum::{Datum, Lvalue}; use type_of; use type_::Type; -use rustc::hir as ast; +use rustc::hir; +use rustc::ty::Ty; + use std::ffi::CString; use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ia: &ast::InlineAsm, - outputs: Vec>, + ia: &hir::InlineAsm, + outputs: Vec<(ValueRef, Ty<'tcx>)>, mut inputs: Vec) { let mut ext_constraints = vec![]; let mut output_types = vec![]; // Prepare the output operands let mut indirect_outputs = vec![]; - for (i, (out, out_datum)) in ia.outputs.iter().zip(&outputs).enumerate() { + for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() { let val = if out.is_rw || out.is_indirect { - Some(base::load_ty(bcx, out_datum.val, out_datum.ty)) + Some(base::load_ty(bcx, val, ty)) } else { None }; @@ -46,7 +47,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if out.is_indirect { indirect_outputs.push(val.unwrap()); } else { - output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty)); + output_types.push(type_of::type_of(bcx.ccx(), ty)); } } if !indirect_outputs.is_empty() { @@ -100,9 +101,9 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); - for (i, (_, datum)) in outputs.enumerate() { + for (i, (_, &(val, _))) in outputs.enumerate() { let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) }; - Store(bcx, v, datum.val); + Store(bcx, v, val); } // Store expn_id in a metadata node so we can map LLVM errors diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index cab353cd26209..165884c8f55a2 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -33,12 +33,10 @@ use super::ModuleTranslation; use assert_module_sources; use back::link; use back::linker::LinkerInfo; -use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param}; +use llvm::{Linkage, ValueRef, Vector, get_param}; use llvm; -use rustc::cfg; use rustc::hir::def_id::DefId; use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem}; -use rustc::hir::pat_util::simple_name; use rustc::ty::subst::Substs; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; @@ -47,36 +45,27 @@ use rustc::dep_graph::{DepNode, WorkProduct}; use rustc::hir::map as hir_map; use rustc::util::common::time; use rustc::mir::mir_map::MirMap; -use rustc_data_structures::graph::OUTGOING; +use session::config::{self, NoDebugInfo}; use rustc_incremental::IncrementalHashesMap; -use session::config::{self, NoDebugInfo, FullDebugInfo}; use session::Session; -use _match; use abi::{self, Abi, FnType}; use adt; use attributes; use build::*; use builder::{Builder, noname}; -use callee::{Callee, CallArgs, ArgExprs, ArgVals}; -use cleanup::{self, CleanupMethods, DropHint}; -use closure; -use common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral}; +use callee::{Callee}; +use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; -use common::{CrateContext, DropFlagHintsMap, Field, FunctionContext}; -use common::{Result, NodeIdAndSpan, VariantInfo}; -use common::{node_id_type, fulfill_obligation}; -use common::{type_is_immediate, type_is_zero_size, val_ty}; +use common::{CrateContext, FunctionContext}; +use common::{Result}; +use common::{fulfill_obligation}; +use common::{type_is_zero_size, val_ty}; use common; use consts; use context::{SharedCrateContext, CrateContextList}; -use controlflow; -use datum; -use debuginfo::{self, DebugLoc, ToDebugLoc}; +use debuginfo::{self, DebugLoc}; use declare; -use expr; -use glue; -use inline; use machine; use machine::{llalign_of_min, llsize_of}; use meth; @@ -86,14 +75,12 @@ use partitioning::{self, PartitioningStrategy, CodegenUnit}; use symbol_map::SymbolMap; use symbol_names_test; use trans_item::TransItem; -use tvec; use type_::Type; use type_of; use value::Value; use Disr; -use util::common::indenter; use util::sha2::Sha256; -use util::nodemap::{NodeMap, NodeSet, FnvHashSet}; +use util::nodemap::{NodeSet, FnvHashSet}; use arena::TypedArena; use libc::c_uint; @@ -104,12 +91,10 @@ use std::collections::HashMap; use std::ptr; use std::rc::Rc; use std::str; -use std::{i8, i16, i32, i64}; +use std::i32; use syntax_pos::{Span, DUMMY_SP}; -use syntax::parse::token::InternedString; use syntax::attr::AttrMetaMethods; use syntax::attr; -use rustc::hir::intravisit::{self, Visitor}; use rustc::hir; use syntax::ast; @@ -192,8 +177,12 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { } } -pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind { - *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap() +pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { + StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) +} + +pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { + StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) } fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId { @@ -220,7 +209,7 @@ pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Allocate space: let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); let r = Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx())) - .call(bcx, debug_loc, ArgVals(&[size, align]), None); + .call(bcx, debug_loc, &[size, align], None); Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) } @@ -395,154 +384,6 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty) } -// Iterates through the elements of a structural type. -pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, - av: ValueRef, - t: Ty<'tcx>, - mut f: F) - -> Block<'blk, 'tcx> - where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx> -{ - let _icx = push_ctxt("iter_structural_ty"); - - fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, - av: adt::MaybeSizedValue, - variant: ty::VariantDef<'tcx>, - substs: &Substs<'tcx>, - f: &mut F) - -> Block<'blk, 'tcx> - where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx> - { - let _icx = push_ctxt("iter_variant"); - let tcx = cx.tcx(); - let mut cx = cx; - - for (i, field) in variant.fields.iter().enumerate() { - let arg = monomorphize::field_ty(tcx, substs, field); - cx = f(cx, - adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i), - arg); - } - return cx; - } - - let value = if common::type_is_sized(cx.tcx(), t) { - adt::MaybeSizedValue::sized(av) - } else { - let data = Load(cx, expr::get_dataptr(cx, av)); - let info = Load(cx, expr::get_meta(cx, av)); - adt::MaybeSizedValue::unsized_(data, info) - }; - - let mut cx = cx; - match t.sty { - ty::TyStruct(..) => { - let repr = adt::represent_type(cx.ccx(), t); - let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); - for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i); - - let val = if common::type_is_sized(cx.tcx(), field_ty) { - llfld_a - } else { - let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter"); - Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val)); - Store(cx, value.meta, expr::get_meta(cx, scratch.val)); - scratch.val - }; - cx = f(cx, val, field_ty); - } - } - ty::TyClosure(_, ref substs) => { - let repr = adt::represent_type(cx.ccx(), t); - for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() { - let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i); - cx = f(cx, llupvar, upvar_ty); - } - } - ty::TyArray(_, n) => { - let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n); - let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f); - } - ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f); - } - ty::TyTuple(ref args) => { - let repr = adt::represent_type(cx.ccx(), t); - for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i); - cx = f(cx, llfld_a, *arg); - } - } - ty::TyEnum(en, substs) => { - let fcx = cx.fcx; - let ccx = fcx.ccx; - - let repr = adt::represent_type(ccx, t); - let n_variants = en.variants.len(); - - // NB: we must hit the discriminant first so that structural - // comparison know not to proceed when the discriminants differ. - - match adt::trans_switch(cx, &repr, av, false) { - (_match::Single, None) => { - if n_variants != 0 { - assert!(n_variants == 1); - cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av), - &en.variants[0], substs, &mut f); - } - } - (_match::Switch, Some(lldiscrim_a)) => { - cx = f(cx, lldiscrim_a, cx.tcx().types.isize); - - // Create a fall-through basic block for the "else" case of - // the switch instruction we're about to generate. Note that - // we do **not** use an Unreachable instruction here, even - // though most of the time this basic block will never be hit. - // - // When an enum is dropped it's contents are currently - // overwritten to DTOR_DONE, which means the discriminant - // could have changed value to something not within the actual - // range of the discriminant. Currently this function is only - // used for drop glue so in this case we just return quickly - // from the outer function, and any other use case will only - // call this for an already-valid enum in which case the `ret - // void` will never be hit. - let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void"); - RetVoid(ret_void_cx, DebugLoc::None); - let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); - let next_cx = fcx.new_temp_block("enum-iter-next"); - - for variant in &en.variants { - let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}", - &variant.disr_val - .to_string())); - let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val)); - AddCase(llswitch, case_val, variant_cx.llbb); - let variant_cx = iter_variant(variant_cx, - &repr, - value, - variant, - substs, - &mut f); - Br(variant_cx, next_cx.llbb, DebugLoc::None); - } - cx = next_cx; - } - _ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"), - } - } - _ => { - cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t)) - } - } - return cx; -} - - /// Retrieve the information we are losing (making dynamic) in an unsizing /// adjustment. /// @@ -634,12 +475,12 @@ pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let src_repr = adt::represent_type(bcx.ccx(), src_ty); let src_fields = match &*src_repr { - &adt::Repr::Univariant(ref s, _) => &s.fields, + &adt::Repr::Univariant(ref s) => &s.fields, _ => bug!("struct has non-univariant repr"), }; let dst_repr = adt::represent_type(bcx.ccx(), dst_ty); let dst_fields = match &*dst_repr { - &adt::Repr::Univariant(ref s, _) => &s.fields, + &adt::Repr::Univariant(ref s) => &s.fields, _ => bug!("struct has non-univariant repr"), }; @@ -733,101 +574,6 @@ fn cast_shift_rhs(op: hir::BinOp_, } } -pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - val_t: Ty<'tcx>) - -> (Type, u64) { - match val_t.sty { - ty::TyInt(t) => { - let llty = Type::int_from_ty(cx.ccx(), t); - let min = match t { - ast::IntTy::Is if llty == Type::i32(cx.ccx()) => i32::MIN as u64, - ast::IntTy::Is => i64::MIN as u64, - ast::IntTy::I8 => i8::MIN as u64, - ast::IntTy::I16 => i16::MIN as u64, - ast::IntTy::I32 => i32::MIN as u64, - ast::IntTy::I64 => i64::MIN as u64, - }; - (llty, min) - } - _ => bug!(), - } -} - -pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - divrem: hir::BinOp, - lhs: ValueRef, - rhs: ValueRef, - rhs_t: Ty<'tcx>) - -> Block<'blk, 'tcx> { - use rustc_const_math::{ConstMathErr, Op}; - - let (zero_err, overflow_err) = if divrem.node == hir::BiDiv { - (ConstMathErr::DivisionByZero, ConstMathErr::Overflow(Op::Div)) - } else { - (ConstMathErr::RemainderByZero, ConstMathErr::Overflow(Op::Rem)) - }; - let debug_loc = call_info.debug_loc(); - - let (is_zero, is_signed) = match rhs_t.sty { - ty::TyInt(t) => { - let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false); - (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true) - } - ty::TyUint(t) => { - let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false); - (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false) - } - ty::TyStruct(def, _) if def.is_simd() => { - let mut res = C_bool(cx.ccx(), false); - for i in 0..rhs_t.simd_size(cx.tcx()) { - res = Or(cx, - res, - IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), - debug_loc); - } - (res, false) - } - _ => { - bug!("fail-if-zero on unexpected type: {}", rhs_t); - } - }; - let bcx = with_cond(cx, is_zero, |bcx| { - controlflow::trans_fail(bcx, call_info, InternedString::new(zero_err.description())) - }); - - // To quote LLVM's documentation for the sdiv instruction: - // - // Division by zero leads to undefined behavior. Overflow also leads - // to undefined behavior; this is a rare case, but can occur, for - // example, by doing a 32-bit division of -2147483648 by -1. - // - // In order to avoid undefined behavior, we perform runtime checks for - // signed division/remainder which would trigger overflow. For unsigned - // integers, no action beyond checking for zero need be taken. - if is_signed { - let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t); - let minus_one = ICmp(bcx, - llvm::IntEQ, - rhs, - C_integral(llty, !0, false), - debug_loc); - with_cond(bcx, minus_one, |bcx| { - let is_min = ICmp(bcx, - llvm::IntEQ, - lhs, - C_integral(llty, min, true), - debug_loc); - with_cond(bcx, is_min, |bcx| { - controlflow::trans_fail(bcx, call_info, - InternedString::new(overflow_err.description())) - }) - }) - } else { - bcx - } -} - pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llfn: ValueRef, llargs: &[ValueRef], @@ -838,21 +584,12 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, return (C_null(Type::i8(bcx.ccx())), bcx); } - match bcx.opt_node_id { - None => { - debug!("invoke at ???"); - } - Some(id) => { - debug!("invoke at {}", bcx.tcx().map.node_to_string(id)); - } - } - if need_invoke(bcx) { debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb); for &llarg in llargs { debug!("arg: {:?}", Value(llarg)); } - let normal_bcx = bcx.fcx.new_temp_block("normal-return"); + let normal_bcx = bcx.fcx.new_block("normal-return"); let landing_pad = bcx.fcx.get_landing_pad(); let llresult = Invoke(bcx, @@ -894,14 +631,6 @@ pub fn need_invoke(bcx: Block) -> bool { } } -pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef { - let _icx = push_ctxt("load_if_immediate"); - if type_is_immediate(cx.ccx(), t) { - return load_ty(cx, v, t); - } - return v; -} - /// Helper for loading values from memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. Also handles various special cases where the type /// gives us better information about what we are loading. @@ -957,10 +686,10 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t if common::type_is_fat_ptr(cx.tcx(), t) { Store(cx, ExtractValue(cx, v, abi::FAT_PTR_ADDR), - expr::get_dataptr(cx, dst)); + get_dataptr(cx, dst)); Store(cx, ExtractValue(cx, v, abi::FAT_PTR_EXTRA), - expr::get_meta(cx, dst)); + get_meta(cx, dst)); } else { Store(cx, from_immediate(cx, v), dst); } @@ -972,8 +701,8 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, dst: ValueRef, _ty: Ty<'tcx>) { // FIXME: emit metadata - Store(cx, data, expr::get_dataptr(cx, dst)); - Store(cx, extra, expr::get_meta(cx, dst)); + Store(cx, data, get_dataptr(cx, dst)); + Store(cx, extra, get_meta(cx, dst)); } pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, @@ -981,8 +710,8 @@ pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, _ty: Ty<'tcx>) -> (ValueRef, ValueRef) { // FIXME: emit metadata - (Load(cx, expr::get_dataptr(cx, src)), - Load(cx, expr::get_meta(cx, src))) + (Load(cx, get_dataptr(cx, src)), + Load(cx, get_meta(cx, src))) } pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { @@ -1001,19 +730,6 @@ pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { } } -pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> { - debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id); - let _indenter = indenter(); - let _icx = push_ctxt("init_local"); - _match::store_local(bcx, local) -} - -pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>, - llbb: BasicBlockRef) - -> Block<'blk, 'tcx> { - common::BlockS::new(llbb, None, fcx) -} - pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx> where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> { @@ -1024,8 +740,8 @@ pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> } let fcx = bcx.fcx; - let next_cx = fcx.new_temp_block("next"); - let cond_cx = fcx.new_temp_block("cond"); + let next_cx = fcx.new_block("next"); + let cond_cx = fcx.new_block("cond"); CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); let after_cx = f(cond_cx); if !after_cx.terminated.get() { @@ -1099,7 +815,7 @@ pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) { } else { let exc_ptr = ExtractValue(bcx, lpval, 0); bcx.fcx.eh_unwind_resume() - .call(bcx, DebugLoc::None, ArgVals(&[exc_ptr]), None); + .call(bcx, DebugLoc::None, &[exc_ptr], None); } } @@ -1142,15 +858,6 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe } } -pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { - return; - } - let _icx = push_ctxt("drop_done_fill_mem"); - let bcx = cx; - memfill(&B(bcx), llptr, t, adt::DTOR_DONE); -} - pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { if cx.unreachable.get() { return; @@ -1190,82 +897,11 @@ pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } - -/// In general, when we create an scratch value in an alloca, the -/// creator may not know if the block (that initializes the scratch -/// with the desired value) actually dominates the cleanup associated -/// with the scratch value. -/// -/// To deal with this, when we do an alloca (at the *start* of whole -/// function body), we optionally can also set the associated -/// dropped-flag state of the alloca to "dropped." -#[derive(Copy, Clone, Debug)] -pub enum InitAlloca { - /// Indicates that the state should have its associated drop flag - /// set to "dropped" at the point of allocation. - Dropped, - /// Indicates the value of the associated drop flag is irrelevant. - /// The embedded string literal is a programmer provided argument - /// for why. This is a safeguard forcing compiler devs to - /// document; it might be a good idea to also emit this as a - /// comment with the alloca itself when emitting LLVM output.ll. - Uninit(&'static str), -} - - pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, + ty: Ty<'tcx>, name: &str) -> ValueRef { - // pnkfelix: I do not know why alloc_ty meets the assumptions for - // passing Uninit, but it was never needed (even back when we had - // the original boolean `zero` flag on `lvalue_scratch_datum`). - alloc_ty_init(bcx, t, InitAlloca::Uninit("all alloc_ty are uninit"), name) -} - -/// This variant of `fn alloc_ty` does not necessarily assume that the -/// alloca should be created with no initial value. Instead the caller -/// controls that assumption via the `init` flag. -/// -/// Note that if the alloca *is* initialized via `init`, then we will -/// also inject an `llvm.lifetime.start` before that initialization -/// occurs, and thus callers should not call_lifetime_start -/// themselves. But if `init` says "uninitialized", then callers are -/// in charge of choosing where to call_lifetime_start and -/// subsequently populate the alloca. -/// -/// (See related discussion on PR #30823.) -pub fn alloc_ty_init<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - init: InitAlloca, - name: &str) -> ValueRef { - let _icx = push_ctxt("alloc_ty"); - let ccx = bcx.ccx(); - let ty = type_of::type_of(ccx, t); - assert!(!t.has_param_types()); - match init { - InitAlloca::Dropped => alloca_dropped(bcx, t, name), - InitAlloca::Uninit(_) => alloca(bcx, ty, name), - } -} - -pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { - let _icx = push_ctxt("alloca_dropped"); - let llty = type_of::type_of(cx.ccx(), ty); - if cx.unreachable.get() { - unsafe { return llvm::LLVMGetUndef(llty.ptr_to().to_ref()); } - } - let p = alloca(cx, llty, name); - let b = cx.fcx.ccx.builder(); - b.position_before(cx.fcx.alloca_insert_pt.get().unwrap()); - - // This is just like `call_lifetime_start` (but latter expects a - // Block, which we do not have for `alloca_insert_pt`). - core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| { - let ptr = b.pointercast(p, Type::i8p(ccx)); - b.call(lifetime_start, &[C_u64(ccx, size), ptr], None); - }); - memfill(&b, p, ty, adt::DTOR_DONE); - p + assert!(!ty.has_param_types()); + alloca(bcx, type_of::type_of(bcx.ccx(), ty), name) } pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { @@ -1279,121 +915,6 @@ pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { Alloca(cx, ty, name) } -pub fn set_value_name(val: ValueRef, name: &str) { - unsafe { - let name = CString::new(name).unwrap(); - llvm::LLVMSetValueName(val, name.as_ptr()); - } -} - -struct FindNestedReturn { - found: bool, -} - -impl FindNestedReturn { - fn new() -> FindNestedReturn { - FindNestedReturn { - found: false, - } - } -} - -impl<'v> Visitor<'v> for FindNestedReturn { - fn visit_expr(&mut self, e: &hir::Expr) { - match e.node { - hir::ExprRet(..) => { - self.found = true; - } - _ => intravisit::walk_expr(self, e), - } - } -} - -fn build_cfg<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: ast::NodeId) - -> (ast::NodeId, Option) { - let blk = match tcx.map.find(id) { - Some(hir_map::NodeItem(i)) => { - match i.node { - hir::ItemFn(_, _, _, _, _, ref blk) => { - blk - } - _ => bug!("unexpected item variant in has_nested_returns"), - } - } - Some(hir_map::NodeTraitItem(trait_item)) => { - match trait_item.node { - hir::MethodTraitItem(_, Some(ref body)) => body, - _ => { - bug!("unexpected variant: trait item other than a provided method in \ - has_nested_returns") - } - } - } - Some(hir_map::NodeImplItem(impl_item)) => { - match impl_item.node { - hir::ImplItemKind::Method(_, ref body) => body, - _ => { - bug!("unexpected variant: non-method impl item in has_nested_returns") - } - } - } - Some(hir_map::NodeExpr(e)) => { - match e.node { - hir::ExprClosure(_, _, ref blk, _) => blk, - _ => bug!("unexpected expr variant in has_nested_returns"), - } - } - Some(hir_map::NodeVariant(..)) | - Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None), - - // glue, shims, etc - None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None), - - _ => bug!("unexpected variant in has_nested_returns: {}", - tcx.node_path_str(id)), - }; - - (blk.id, Some(cfg::CFG::new(tcx, blk))) -} - -// Checks for the presence of "nested returns" in a function. -// Nested returns are when the inner expression of a return expression -// (the 'expr' in 'return expr') contains a return expression. Only cases -// where the outer return is actually reachable are considered. Implicit -// returns from the end of blocks are considered as well. -// -// This check is needed to handle the case where the inner expression is -// part of a larger expression that may have already partially-filled the -// return slot alloca. This can cause errors related to clean-up due to -// the clobbering of the existing value in the return slot. -fn has_nested_returns(tcx: TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool { - for index in cfg.graph.depth_traverse(cfg.entry, OUTGOING) { - let n = cfg.graph.node_data(index); - match tcx.map.find(n.id()) { - Some(hir_map::NodeExpr(ex)) => { - if let hir::ExprRet(Some(ref ret_expr)) = ex.node { - let mut visitor = FindNestedReturn::new(); - intravisit::walk_expr(&mut visitor, &ret_expr); - if visitor.found { - return true; - } - } - } - Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => { - let mut visitor = FindNestedReturn::new(); - walk_list!(&mut visitor, visit_expr, &blk.expr); - if visitor.found { - return true; - } - } - _ => {} - } - } - - return false; -} - impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Create a function context for the given function. /// Beware that you must call `fcx.init` or `fcx.bind_args` @@ -1401,15 +922,15 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>, llfndecl: ValueRef, fn_ty: FnType, - definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi, ast::NodeId)>, + definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>, block_arena: &'blk TypedArena>) -> FunctionContext<'blk, 'tcx> { - let (param_substs, def_id, inlined_id) = match definition { - Some((instance, _, _, inlined_id)) => { + let (param_substs, def_id) = match definition { + Some((instance, _, _)) => { common::validate_substs(instance.substs); - (instance.substs, Some(instance.def), Some(inlined_id)) + (instance.substs, Some(instance.def)) } - None => (Substs::empty(ccx.tcx()), None, None) + None => (Substs::empty(ccx.tcx()), None) }; let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id)); @@ -1417,70 +938,47 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { debug!("FunctionContext::new({})", definition.map_or(String::new(), |d| d.0.to_string())); - let cfg = inlined_id.map(|id| build_cfg(ccx.tcx(), id)); - let nested_returns = if let Some((blk_id, Some(ref cfg))) = cfg { - has_nested_returns(ccx.tcx(), cfg, blk_id) - } else { - false - }; - - let check_attrs = |attrs: &[ast::Attribute]| { - let default_to_mir = ccx.sess().opts.debugging_opts.orbit; - let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" }; - (default_to_mir ^ attrs.iter().any(|item| item.check_name(invert)), - attrs.iter().any(|item| item.check_name("no_debug"))) - }; - - let (use_mir, no_debug) = if let Some(id) = local_id { - check_attrs(ccx.tcx().map.attrs(id)) + let no_debug = if let Some(id) = local_id { + ccx.tcx().map.attrs(id) + .iter().any(|item| item.check_name("no_debug")) } else if let Some(def_id) = def_id { - check_attrs(&ccx.sess().cstore.item_attrs(def_id)) + ccx.sess().cstore.item_attrs(def_id) + .iter().any(|item| item.check_name("no_debug")) } else { - check_attrs(&[]) + false }; - let mir = if use_mir { - def_id.and_then(|id| ccx.get_mir(id)) - } else { - None - }; + let mir = def_id.and_then(|id| ccx.get_mir(id)); - let debug_context = if let (false, Some(definition)) = (no_debug, definition) { - let (instance, sig, abi, _) = definition; - debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl) + let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) = + (no_debug, definition, &mir) { + debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir) } else { debuginfo::empty_function_debug_context(ccx) }; FunctionContext { - needs_ret_allocas: nested_returns && mir.is_none(), mir: mir, llfn: llfndecl, llretslotptr: Cell::new(None), param_env: ccx.tcx().empty_parameter_environment(), alloca_insert_pt: Cell::new(None), - llreturn: Cell::new(None), landingpad_alloca: Cell::new(None), - lllocals: RefCell::new(NodeMap()), - llupvars: RefCell::new(NodeMap()), - lldropflag_hints: RefCell::new(DropFlagHintsMap::new()), fn_ty: fn_ty, param_substs: param_substs, - span: inlined_id.and_then(|id| ccx.tcx().map.opt_span(id)), + span: None, block_arena: block_arena, lpad_arena: TypedArena::new(), ccx: ccx, debug_context: debug_context, scopes: RefCell::new(Vec::new()), - cfg: cfg.and_then(|(_, cfg)| cfg) } } /// Performs setup on a newly created function, creating the entry /// scope block and allocating space for the return pointer. - pub fn init(&'blk self, skip_retptr: bool, fn_did: Option) - -> Block<'blk, 'tcx> { - let entry_bcx = self.new_temp_block("entry-block"); + pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> { + let entry_bcx = self.new_block("entry-block"); // Use a dummy instruction as the insertion point for all allocas. // This is later removed in FunctionContext::cleanup. @@ -1498,244 +996,26 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // which will hold the pointer to the right alloca which has the // final ret value let llty = self.fn_ty.ret.memory_ty(self.ccx); - let slot = if self.needs_ret_allocas { - // Let's create the stack slot - let slot = AllocaFcx(self, llty.ptr_to(), "llretslotptr"); - - // and if we're using an out pointer, then store that in our newly made slot - if self.fn_ty.ret.is_indirect() { - let outptr = get_param(self.llfn, 0); - - let b = self.ccx.builder(); - b.position_before(self.alloca_insert_pt.get().unwrap()); - b.store(outptr, slot); - } - - slot + // But if there are no nested returns, we skip the indirection + // and have a single retslot + let slot = if self.fn_ty.ret.is_indirect() { + get_param(self.llfn, 0) } else { - // But if there are no nested returns, we skip the indirection - // and have a single retslot - if self.fn_ty.ret.is_indirect() { - get_param(self.llfn, 0) - } else { - AllocaFcx(self, llty, "sret_slot") - } + AllocaFcx(self, llty, "sret_slot") }; self.llretslotptr.set(Some(slot)); } - // Create the drop-flag hints for every unfragmented path in the function. - let tcx = self.ccx.tcx(); - let tables = tcx.tables.borrow(); - let mut hints = self.lldropflag_hints.borrow_mut(); - let fragment_infos = tcx.fragment_infos.borrow(); - - // Intern table for drop-flag hint datums. - let mut seen = HashMap::new(); - - let fragment_infos = fn_did.and_then(|did| fragment_infos.get(&did)); - if let Some(fragment_infos) = fragment_infos { - for &info in fragment_infos { - - let make_datum = |id| { - let init_val = C_u8(self.ccx, adt::DTOR_NEEDED_HINT); - let llname = &format!("dropflag_hint_{}", id); - debug!("adding hint {}", llname); - let ty = tcx.types.u8; - let ptr = alloc_ty(entry_bcx, ty, llname); - Store(entry_bcx, init_val, ptr); - let flag = datum::Lvalue::new_dropflag_hint("FunctionContext::init"); - datum::Datum::new(ptr, ty, flag) - }; - - let (var, datum) = match info { - ty::FragmentInfo::Moved { var, .. } | - ty::FragmentInfo::Assigned { var, .. } => { - let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| { - let ty = tables.node_types[&var]; - if self.type_needs_drop(ty) { - let datum = make_datum(var); - seen.insert(var, Some(datum.clone())); - Some(datum) - } else { - // No drop call needed, so we don't need a dropflag hint - None - } - }); - if let Some(datum) = opt_datum { - (var, datum) - } else { - continue - } - } - }; - match info { - ty::FragmentInfo::Moved { move_expr: expr_id, .. } => { - debug!("FragmentInfo::Moved insert drop hint for {}", expr_id); - hints.insert(expr_id, DropHint::new(var, datum)); - } - ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => { - debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id); - hints.insert(expr_id, DropHint::new(var, datum)); - } - } - } - } - entry_bcx } - /// Creates lvalue datums for each of the incoming function arguments, - /// matches all argument patterns against them to produce bindings, - /// and returns the entry block (see FunctionContext::init). - fn bind_args(&'blk self, - args: &[hir::Arg], - abi: Abi, - id: ast::NodeId, - closure_env: closure::ClosureEnv, - arg_scope: cleanup::CustomScopeIndex) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("FunctionContext::bind_args"); - let fn_did = self.ccx.tcx().map.local_def_id(id); - let mut bcx = self.init(false, Some(fn_did)); - let arg_scope_id = cleanup::CustomScope(arg_scope); - - let mut idx = 0; - let mut llarg_idx = self.fn_ty.ret.is_indirect() as usize; - - let has_tupled_arg = match closure_env { - closure::ClosureEnv::NotClosure => abi == Abi::RustCall, - closure::ClosureEnv::Closure(..) => { - closure_env.load(bcx, arg_scope_id); - let env_arg = &self.fn_ty.args[idx]; - idx += 1; - if env_arg.pad.is_some() { - llarg_idx += 1; - } - if !env_arg.is_ignore() { - llarg_idx += 1; - } - false - } - }; - let tupled_arg_id = if has_tupled_arg { - args[args.len() - 1].id - } else { - ast::DUMMY_NODE_ID - }; - - // Return an array wrapping the ValueRefs that we get from `get_param` for - // each argument into datums. - // - // For certain mode/type combinations, the raw llarg values are passed - // by value. However, within the fn body itself, we want to always - // have all locals and arguments be by-ref so that we can cancel the - // cleanup and for better interaction with LLVM's debug info. So, if - // the argument would be passed by value, we store it into an alloca. - // This alloca should be optimized away by LLVM's mem-to-reg pass in - // the event it's not truly needed. - let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor"); - for hir_arg in args { - let arg_ty = node_id_type(bcx, hir_arg.id); - let arg_datum = if hir_arg.id != tupled_arg_id { - let arg = &self.fn_ty.args[idx]; - idx += 1; - if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { - // Don't copy an indirect argument to an alloca, the caller - // already put it in a temporary alloca and gave it up, unless - // we emit extra-debug-info, which requires local allocas :(. - let llarg = get_param(self.llfn, llarg_idx as c_uint); - llarg_idx += 1; - self.schedule_lifetime_end(arg_scope_id, llarg); - self.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None); - - datum::Datum::new(llarg, - arg_ty, - datum::Lvalue::new("FunctionContext::bind_args")) - } else { - unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "", - uninit_reason, - arg_scope_id, |bcx, dst| { - debug!("FunctionContext::bind_args: {:?}: {:?}", hir_arg, arg_ty); - let b = &bcx.build(); - if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { - let meta = &self.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst)); - meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst)); - } else { - arg.store_fn_arg(b, &mut llarg_idx, dst); - } - bcx - })) - } - } else { - // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for. - let tupled_arg_tys = match arg_ty.sty { - ty::TyTuple(ref tys) => tys, - _ => bug!("last argument of `rust-call` fn isn't a tuple?!") - }; - - unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, - arg_ty, - "tupled_args", - uninit_reason, - arg_scope_id, - |bcx, llval| { - debug!("FunctionContext::bind_args: tupled {:?}: {:?}", hir_arg, arg_ty); - for (j, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { - let dst = StructGEP(bcx, llval, j); - let arg = &self.fn_ty.args[idx]; - idx += 1; - let b = &bcx.build(); - if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) { - let meta = &self.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst)); - meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst)); - } else { - arg.store_fn_arg(b, &mut llarg_idx, dst); - } - } - bcx - })) - }; - - let pat = &hir_arg.pat; - bcx = if let Some(name) = simple_name(pat) { - // Generate nicer LLVM for the common case of fn a pattern - // like `x: T` - set_value_name(arg_datum.val, &bcx.name(name)); - self.lllocals.borrow_mut().insert(pat.id, arg_datum); - bcx - } else { - // General path. Copy out the values that are used in the - // pattern. - _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id) - }; - debuginfo::create_argument_metadata(bcx, hir_arg); - } - - bcx - } - /// Ties up the llstaticallocas -> llloadenv -> lltop edges, /// and builds the return block. - pub fn finish(&'blk self, last_bcx: Block<'blk, 'tcx>, + pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>, ret_debug_loc: DebugLoc) { let _icx = push_ctxt("FunctionContext::finish"); - let ret_cx = match self.llreturn.get() { - Some(llreturn) => { - if !last_bcx.terminated.get() { - Br(last_bcx, llreturn, DebugLoc::None); - } - raw_block(self, llreturn) - } - None => last_bcx, - }; - self.build_return_block(ret_cx, ret_debug_loc); DebugLoc::None.apply(self); @@ -1747,15 +1027,11 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { ret_debug_location: DebugLoc) { if self.llretslotptr.get().is_none() || ret_cx.unreachable.get() || - (!self.needs_ret_allocas && self.fn_ty.ret.is_indirect()) { + self.fn_ty.ret.is_indirect() { return RetVoid(ret_cx, ret_debug_location); } - let retslot = if self.needs_ret_allocas { - Load(ret_cx, self.llretslotptr.get().unwrap()) - } else { - self.llretslotptr.get().unwrap() - }; + let retslot = self.llretslotptr.get().unwrap(); let retptr = Value(retslot); let llty = self.fn_ty.ret.original_ty; match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) { @@ -1814,14 +1090,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// /// If the function closes over its environment a closure will be returned. pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, llfndecl: ValueRef, instance: Instance<'tcx>, - inlined_id: ast::NodeId, sig: &ty::FnSig<'tcx>, - abi: Abi, - closure_env: closure::ClosureEnv) { + abi: Abi) { ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1); let _icx = push_ctxt("trans_closure"); @@ -1841,84 +1113,21 @@ pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fcx = FunctionContext::new(ccx, llfndecl, fn_ty, - Some((instance, sig, abi, inlined_id)), + Some((instance, sig, abi)), &arena); - if fcx.mir.is_some() { - return mir::trans_mir(&fcx); + if fcx.mir.is_none() { + bug!("attempted translation of `{}` w/o MIR", instance); } - debuginfo::fill_scope_map_for_function(&fcx, decl, body, inlined_id); - - // cleanup scope for the incoming arguments - let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node( - ccx, inlined_id, body.span, true); - let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc); - - // Set up arguments to the function. - debug!("trans_closure: function: {:?}", Value(fcx.llfn)); - let bcx = fcx.bind_args(&decl.inputs, abi, inlined_id, closure_env, arg_scope); - - // Up until here, IR instructions for this function have explicitly not been annotated with - // source code location, so we don't step into call setup code. From here on, source location - // emitting should be enabled. - debuginfo::start_emitting_source_locations(&fcx); - - let dest = if fcx.fn_ty.ret.is_ignore() { - expr::Ignore - } else { - expr::SaveIn(fcx.get_ret_slot(bcx, "iret_slot")) - }; - - // This call to trans_block is the place where we bridge between - // translation calls that don't have a return value (trans_crate, - // trans_mod, trans_item, et cetera) and those that do - // (trans_block, trans_expr, et cetera). - let mut bcx = controlflow::trans_block(bcx, body, dest); - - match dest { - expr::SaveIn(slot) if fcx.needs_ret_allocas => { - Store(bcx, slot, fcx.llretslotptr.get().unwrap()); - } - _ => {} - } - - match fcx.llreturn.get() { - Some(_) => { - Br(bcx, fcx.return_exit_block(), DebugLoc::None); - fcx.pop_custom_cleanup_scope(arg_scope); - } - None => { - // Microoptimization writ large: avoid creating a separate - // llreturn basic block - bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope); - } - }; - - // Put return block after all other blocks. - // This somewhat improves single-stepping experience in debugger. - unsafe { - let llreturn = fcx.llreturn.get(); - if let Some(llreturn) = llreturn { - llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb); - } - } - - // Insert the mandatory first few basic blocks before lltop. - fcx.finish(bcx, fn_cleanup_debug_loc.debug_loc()); + mir::trans_mir(&fcx); } pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { - let local_instance = inline::maybe_inline_instance(ccx, instance); - - let fn_node_id = ccx.tcx().map.as_local_node_id(local_instance.def).unwrap(); - - let _s = StatRecorder::new(ccx, ccx.tcx().node_path_str(fn_node_id)); + let _s = StatRecorder::new(ccx, ccx.tcx().item_path_str(instance.def)); debug!("trans_instance(instance={:?})", instance); let _icx = push_ctxt("trans_instance"); - let item = ccx.tcx().map.find(fn_node_id).unwrap(); - let fn_ty = ccx.tcx().lookup_item_type(instance.def).ty; let fn_ty = ccx.tcx().erase_regions(&fn_ty); let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), instance.substs, &fn_ty); @@ -1927,109 +1136,24 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let sig = ccx.tcx().normalize_associated_type(&sig); let abi = fn_ty.fn_abi(); - let lldecl = match ccx.instances().borrow().get(&local_instance) { + let lldecl = match ccx.instances().borrow().get(&instance) { Some(&val) => val, None => bug!("Instance `{:?}` not already declared", instance) }; - match item { - hir_map::NodeItem(&hir::Item { - node: hir::ItemFn(ref decl, _, _, _, _, ref body), .. - }) | - hir_map::NodeTraitItem(&hir::TraitItem { - node: hir::MethodTraitItem( - hir::MethodSig { ref decl, .. }, Some(ref body)), .. - }) | - hir_map::NodeImplItem(&hir::ImplItem { - node: hir::ImplItemKind::Method( - hir::MethodSig { ref decl, .. }, ref body), .. - }) => { - trans_closure(ccx, decl, body, lldecl, instance, - fn_node_id, &sig, abi, closure::ClosureEnv::NotClosure); - } - _ => bug!("Instance is a {:?}?", item) - } -} - -pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - ctor_ty: Ty<'tcx>, - disr: Disr, - args: CallArgs, - dest: expr::Dest, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - - let ccx = bcx.fcx.ccx; - - let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig()); - let sig = ccx.tcx().normalize_associated_type(&sig); - let result_ty = sig.output; - - // Get location to store the result. If the user does not care about - // the result, just make a stack slot - let llresult = match dest { - expr::SaveIn(d) => d, - expr::Ignore => { - if !type_is_zero_size(ccx, result_ty) { - let llresult = alloc_ty(bcx, result_ty, "constructor_result"); - call_lifetime_start(bcx, llresult); - llresult - } else { - C_undef(type_of::type_of(ccx, result_ty).ptr_to()) - } - } - }; - - if !type_is_zero_size(ccx, result_ty) { - match args { - ArgExprs(exprs) => { - let fields = exprs.iter().map(|x| &**x).enumerate().collect::>(); - bcx = expr::trans_adt(bcx, - result_ty, - disr, - &fields[..], - None, - expr::SaveIn(llresult), - debug_loc); - } - _ => bug!("expected expr as arguments for variant/struct tuple constructor"), - } - } else { - // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary - // contents, there could be side-effects we need from them. - match args { - ArgExprs(exprs) => { - for expr in exprs { - bcx = expr::trans_into(bcx, expr, expr::Ignore); - } - } - _ => (), - } - } - - // If the caller doesn't care about the result - // drop the temporary we made - let bcx = match dest { - expr::SaveIn(_) => bcx, - expr::Ignore => { - let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc); - if !type_is_zero_size(ccx, result_ty) { - call_lifetime_end(bcx, llresult); - } - bcx - } - }; - - Result::new(bcx, llresult) + trans_closure(ccx, lldecl, instance, &sig, abi); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ctor_id: ast::NodeId, + def_id: DefId, + substs: &'tcx Substs<'tcx>, disr: Disr, - param_substs: &'tcx Substs<'tcx>, llfndecl: ValueRef) { - let ctor_ty = ccx.tcx().node_id_to_type(ctor_id); - let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty); + attributes::inline(llfndecl, attributes::InlineAttr::Hint); + attributes::set_frame_pointer_elimination(ccx, llfndecl); + + let ctor_ty = ccx.tcx().lookup_item_type(def_id).ty; + let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), substs, &ctor_ty); let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig()); let sig = ccx.tcx().normalize_associated_type(&sig); @@ -2038,12 +1162,10 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let (arena, fcx): (TypedArena<_>, FunctionContext); arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena); - let bcx = fcx.init(false, None); - - assert!(!fcx.needs_ret_allocas); + let bcx = fcx.init(false); if !fcx.fn_ty.ret.is_ignore() { - let dest = fcx.get_ret_slot(bcx, "eret_slot"); + let dest = fcx.llretslotptr.get().unwrap(); let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value let repr = adt::represent_type(ccx, sig.output); let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; @@ -2056,8 +1178,8 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { let meta = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, lldestptr)); - meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, lldestptr)); + arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr)); + meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr)); } else { arg.store_fn_arg(b, &mut llarg_idx, lldestptr); } @@ -2133,7 +1255,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { return; } - let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx).val; + let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx); let et = ccx.sess().entry_type.get().unwrap(); match et { @@ -2175,7 +1297,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { Err(s) => ccx.sess().fatal(&s) }; let empty_substs = Substs::empty(ccx.tcx()); - let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx).val; + let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx); let args = { let opaque_rust_main = llvm::LLVMBuildPointerCast(bld, @@ -2502,12 +1624,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx.sess.opts.debug_assertions }; - let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks { - v - } else { - tcx.sess.opts.debug_assertions - }; - let link_meta = link::build_link_meta(incremental_hashes_map, name); let shared_ccx = SharedCrateContext::new(tcx, @@ -2516,8 +1632,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, Sha256::new(), link_meta.clone(), reachable, - check_overflow, - check_dropflag); + check_overflow); // Translate the metadata. let metadata = time(tcx.sess.time_passes(), "write metadata", || { write_metadata(&shared_ccx, shared_ccx.reachable()) @@ -2631,10 +1746,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, println!("n_null_glues: {}", stats.n_null_glues.get()); println!("n_real_glues: {}", stats.n_real_glues.get()); - println!("n_fallback_instantiations: {}", stats.n_fallback_instantiations.get()); - println!("n_fns: {}", stats.n_fns.get()); - println!("n_monos: {}", stats.n_monos.get()); println!("n_inlines: {}", stats.n_inlines.get()); println!("n_closures: {}", stats.n_closures.get()); println!("fn stats:"); diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index d50959b5ab302..9aa486dc62811 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -15,48 +15,32 @@ //! closure. pub use self::CalleeData::*; -pub use self::CallArgs::*; use arena::TypedArena; use back::symbol_names; use llvm::{self, ValueRef, get_params}; -use middle::cstore::LOCAL_CRATE; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::traits; -use rustc::hir::map as hir_map; use abi::{Abi, FnType}; -use adt; use attributes; use base; use base::*; use build::*; -use cleanup; -use cleanup::CleanupMethods; use closure; -use common::{self, Block, Result, CrateContext, FunctionContext, C_undef}; +use common::{self, Block, Result, CrateContext, FunctionContext}; use consts; -use datum::*; use debuginfo::DebugLoc; use declare; -use expr; -use glue; -use inline; -use intrinsic; -use machine::llalign_of_min; use meth; use monomorphize::{self, Instance}; use trans_item::TransItem; -use type_::Type; use type_of; -use value::Value; use Disr; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::hir; use syntax_pos::DUMMY_SP; -use errors; -use syntax::ptr::P; #[derive(Debug)] pub enum CalleeData { @@ -80,10 +64,10 @@ pub struct Callee<'tcx> { impl<'tcx> Callee<'tcx> { /// Function pointer. - pub fn ptr(datum: Datum<'tcx, Rvalue>) -> Callee<'tcx> { + pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> { Callee { - data: Fn(datum.val), - ty: datum.ty + data: Fn(llfn), + ty: ty } } @@ -113,32 +97,28 @@ impl<'tcx> Callee<'tcx> { return Callee::trait_method(ccx, trait_id, def_id, substs); } - let maybe_node_id = inline::get_local_instance(ccx, def_id) - .and_then(|def_id| tcx.map.as_local_node_id(def_id)); - let maybe_ast_node = maybe_node_id.and_then(|node_id| { - tcx.map.find(node_id) - }); - - let data = match maybe_ast_node { - Some(hir_map::NodeStructCtor(_)) => { - NamedTupleConstructor(Disr(0)) - } - Some(hir_map::NodeVariant(_)) => { - let vinfo = common::inlined_variant_def(ccx, maybe_node_id.unwrap()); - NamedTupleConstructor(Disr::from(vinfo.disr_val)) + let fn_ty = def_ty(tcx, def_id, substs); + if let ty::TyFnDef(_, _, f) = fn_ty.sty { + if f.abi == Abi::RustIntrinsic || f.abi == Abi::PlatformIntrinsic { + return Callee { + data: Intrinsic, + ty: fn_ty + }; } - Some(hir_map::NodeForeignItem(fi)) if { - let abi = tcx.map.get_foreign_abi(fi.id); - abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic - } => Intrinsic, - - _ => return Callee::ptr(get_fn(ccx, def_id, substs)) - }; + } - Callee { - data: data, - ty: def_ty(tcx, def_id, substs) + // FIXME(eddyb) Detect ADT constructors more efficiently. + if let Some(adt_def) = fn_ty.fn_ret().skip_binder().ty_adt_def() { + if let Some(v) = adt_def.variants.iter().find(|v| def_id == v.did) { + return Callee { + data: NamedTupleConstructor(Disr::from(v.disr_val)), + ty: fn_ty + }; + } } + + let (llfn, ty) = get_fn(ccx, def_id, substs); + Callee::ptr(llfn, ty) } /// Trait method, which has to be resolved to an impl method. @@ -163,7 +143,8 @@ impl<'tcx> Callee<'tcx> { // That is because default methods have the same ID as the // trait method used to look up the impl method that ended // up here, so calling Callee::def would infinitely recurse. - Callee::ptr(get_fn(ccx, mth.method.def_id, mth.substs)) + let (llfn, ty) = get_fn(ccx, mth.method.def_id, mth.substs); + Callee::ptr(llfn, ty) } traits::VtableClosure(vtable_closure) => { // The substitutions should have no type parameters remaining @@ -175,24 +156,14 @@ impl<'tcx> Callee<'tcx> { trait_closure_kind); let method_ty = def_ty(tcx, def_id, substs); - let fn_ptr_ty = match method_ty.sty { - ty::TyFnDef(_, _, fty) => tcx.mk_fn_ptr(fty), - _ => bug!("expected fn item type, found {}", - method_ty) - }; - Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty)) + Callee::ptr(llfn, method_ty) } traits::VtableFnPointer(vtable_fn_pointer) => { let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, vtable_fn_pointer.fn_ty); let method_ty = def_ty(tcx, def_id, substs); - let fn_ptr_ty = match method_ty.sty { - ty::TyFnDef(_, _, fty) => tcx.mk_fn_ptr(fty), - _ => bug!("expected fn item type, found {}", - method_ty) - }; - Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty)) + Callee::ptr(llfn, method_ty) } traits::VtableObject(ref data) => { Callee { @@ -236,30 +207,34 @@ impl<'tcx> Callee<'tcx> { /// function. pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>, debug_loc: DebugLoc, - args: CallArgs<'a, 'tcx>, - dest: Option) + args: &[ValueRef], + dest: Option) -> Result<'blk, 'tcx> { trans_call_inner(bcx, debug_loc, self, args, dest) } /// Turn the callee into a function pointer. - pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) - -> Datum<'tcx, Rvalue> { - let fn_ptr_ty = match self.ty.sty { - ty::TyFnDef(_, _, f) => ccx.tcx().mk_fn_ptr(f), - _ => self.ty - }; + pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { match self.data { - Fn(llfn) => { - immediate_rvalue(llfn, fn_ptr_ty) - } + Fn(llfn) => llfn, Virtual(idx) => { - let llfn = meth::trans_object_shim(ccx, self.ty, idx); - immediate_rvalue(llfn, fn_ptr_ty) + meth::trans_object_shim(ccx, self.ty, idx) } - NamedTupleConstructor(_) => match self.ty.sty { + NamedTupleConstructor(disr) => match self.ty.sty { ty::TyFnDef(def_id, substs, _) => { - return get_fn(ccx, def_id, substs); + let instance = Instance::new(def_id, substs); + if let Some(&llfn) = ccx.instances().borrow().get(&instance) { + return llfn; + } + + let sym = ccx.symbol_map().get_or_compute(ccx.shared(), + TransItem::Fn(instance)); + assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance))); + let lldecl = declare::define_internal_fn(ccx, &sym, self.ty); + base::trans_ctor_shim(ccx, def_id, substs, disr, lldecl); + ccx.instances().borrow_mut().insert(instance, lldecl); + + lldecl } _ => bug!("expected fn item type, found {}", self.ty) }, @@ -310,7 +285,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( let llfnpointer = match bare_fn_ty.sty { ty::TyFnDef(def_id, substs, _) => { // Function definitions have to be turned into a pointer. - let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val; + let llfn = Callee::def(ccx, def_id, substs).reify(ccx); if !is_by_ref { // A by-value fn item is ignored, so the shim has // the same signature as the original function. @@ -380,7 +355,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false, None); + let mut bcx = fcx.init(false); let llargs = get_params(fcx.llfn); @@ -394,17 +369,13 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( } }); - assert!(!fcx.needs_ret_allocas); - - let dest = fcx.llretslotptr.get().map(|_| - expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")) - ); + let dest = fcx.llretslotptr.get(); let callee = Callee { data: Fn(llfnpointer), ty: bare_fn_ty }; - bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx; + bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx; fcx.finish(bcx, DebugLoc::None); @@ -424,90 +395,27 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>) - -> Datum<'tcx, Rvalue> { + -> (ValueRef, Ty<'tcx>) { let tcx = ccx.tcx(); debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs); assert!(!substs.types.needs_infer()); assert!(!substs.types.has_escaping_regions()); + assert!(!substs.types.has_param_types()); - // Check whether this fn has an inlined copy and, if so, redirect - // def_id to the local id of the inlined copy. - let def_id = inline::maybe_instantiate_inline(ccx, def_id); - - fn is_named_tuple_constructor(tcx: TyCtxt, def_id: DefId) -> bool { - let node_id = match tcx.map.as_local_node_id(def_id) { - Some(n) => n, - None => { return false; } - }; - let map_node = errors::expect( - &tcx.sess.diagnostic(), - tcx.map.find(node_id), - || "local item should be in ast map".to_string()); - - match map_node { - hir_map::NodeVariant(v) => { - v.node.data.is_tuple() - } - hir_map::NodeStructCtor(_) => true, - _ => false - } - } - let must_monomorphise = - !substs.types.is_empty() || is_named_tuple_constructor(tcx, def_id); - - debug!("get_fn({:?}) must_monomorphise: {}", - def_id, must_monomorphise); - - // Create a monomorphic version of generic functions - if must_monomorphise { - // Should be either intra-crate or inlined. - assert_eq!(def_id.krate, LOCAL_CRATE); - - let substs = tcx.normalize_associated_type(&substs); - let (val, fn_ty) = monomorphize::monomorphic_fn(ccx, def_id, substs); - let fn_ptr_ty = match fn_ty.sty { - ty::TyFnDef(_, _, fty) => { - // Create a fn pointer with the substituted signature. - tcx.mk_fn_ptr(fty) - } - _ => bug!("expected fn item type, found {}", fn_ty) - }; - assert_eq!(type_of::type_of(ccx, fn_ptr_ty), common::val_ty(val)); - return immediate_rvalue(val, fn_ptr_ty); - } - - // Find the actual function pointer. - let ty = ccx.tcx().lookup_item_type(def_id).ty; - let fn_ptr_ty = match ty.sty { - ty::TyFnDef(_, _, ref fty) => { - // Create a fn pointer with the normalized signature. - tcx.mk_fn_ptr(tcx.normalize_associated_type(fty)) - } - _ => bug!("expected fn item type, found {}", ty) - }; + let substs = tcx.normalize_associated_type(&substs); + let instance = Instance::new(def_id, substs); + let item_ty = ccx.tcx().lookup_item_type(def_id).ty; + let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), substs, &item_ty); - let instance = Instance::mono(ccx.shared(), def_id); if let Some(&llfn) = ccx.instances().borrow().get(&instance) { - return immediate_rvalue(llfn, fn_ptr_ty); + return (llfn, fn_ty); } - let local_id = ccx.tcx().map.as_local_node_id(def_id); - let local_item = match local_id.and_then(|id| tcx.map.find(id)) { - Some(hir_map::NodeItem(&hir::Item { - span, node: hir::ItemFn(..), .. - })) | - Some(hir_map::NodeTraitItem(&hir::TraitItem { - span, node: hir::MethodTraitItem(_, Some(_)), .. - })) | - Some(hir_map::NodeImplItem(&hir::ImplItem { - span, node: hir::ImplItemKind::Method(..), .. - })) => { - Some(span) - } - _ => None - }; + let sym = ccx.symbol_map().get_or_compute(ccx.shared(), + TransItem::Fn(instance)); + debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym); // This is subtle and surprising, but sometimes we have to bitcast // the resulting fn pointer. The reason has to do with external @@ -533,23 +441,17 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. - let sym = ccx.symbol_map().get_or_compute(ccx.shared(), - TransItem::Fn(instance)); - - let llptrty = type_of::type_of(ccx, fn_ptr_ty); - let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { - if let Some(span) = local_item { - if declare::get_defined_value(ccx, &sym).is_some() { - ccx.sess().span_fatal(span, - &format!("symbol `{}` is already defined", &sym)); - } + let fn_ptr_ty = match fn_ty.sty { + ty::TyFnDef(_, _, fty) => { + // Create a fn pointer with the substituted signature. + tcx.mk_fn_ptr(fty) } + _ => bug!("expected fn item type, found {}", fn_ty) + }; + let llptrty = type_of::type_of(ccx, fn_ptr_ty); + let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { if common::val_ty(llfn) != llptrty { - if local_item.is_some() { - bug!("symbol `{}` previously declared as {:?}, now wanted as {:?}", - sym, Value(llfn), llptrty); - } debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); consts::ptrcast(llfn, llptrty) } else { @@ -557,15 +459,21 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, llfn } } else { - let llfn = declare::declare_fn(ccx, &sym, ty); + let llfn = declare::declare_fn(ccx, &sym, fn_ty); assert_eq!(common::val_ty(llfn), llptrty); debug!("get_fn: not casting pointer!"); let attrs = ccx.tcx().get_attrs(def_id); attributes::from_fn_attrs(ccx, &attrs, llfn); - if local_item.is_some() { + + let is_local_def = ccx.shared().translation_items().borrow() + .contains(&TransItem::Fn(instance)); + if is_local_def { // FIXME(eddyb) Doubt all extern fn should allow unwinding. attributes::unwind(llfn, true); + unsafe { + llvm::LLVMSetLinkage(llfn, llvm::ExternalLinkage); + } } llfn @@ -573,17 +481,17 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ccx.instances().borrow_mut().insert(instance, llfn); - immediate_rvalue(llfn, fn_ptr_ty) + (llfn, fn_ty) } // ______________________________________________________________________ // Translating calls -fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, +fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, debug_loc: DebugLoc, callee: Callee<'tcx>, - args: CallArgs<'a, 'tcx>, - dest: Option) + args: &[ValueRef], + opt_llretslot: Option) -> Result<'blk, 'tcx> { // Introduce a temporary cleanup scope that will contain cleanups // for the arguments while they are being evaluated. The purpose @@ -595,65 +503,16 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let fcx = bcx.fcx; let ccx = fcx.ccx; - let abi = callee.ty.fn_abi(); - let sig = callee.ty.fn_sig(); - let output = bcx.tcx().erase_late_bound_regions(&sig.output()); - let output = bcx.tcx().normalize_associated_type(&output); - - let extra_args = match args { - ArgExprs(args) if abi != Abi::RustCall => { - args[sig.0.inputs.len()..].iter().map(|expr| { - common::expr_ty_adjusted(bcx, expr) - }).collect() - } - _ => vec![] - }; - let fn_ty = callee.direct_fn_type(ccx, &extra_args); + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(ccx, &[]); let mut callee = match callee.data { - Intrinsic => { - assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic); - assert!(dest.is_some()); - - return intrinsic::trans_intrinsic_call(bcx, callee.ty, &fn_ty, - args, dest.unwrap(), - debug_loc); - } - NamedTupleConstructor(disr) => { - assert!(dest.is_some()); - - return base::trans_named_tuple_constructor(bcx, - callee.ty, - disr, - args, - dest.unwrap(), - debug_loc); + NamedTupleConstructor(_) | Intrinsic => { + bug!("{:?} calls should not go through Callee::call", callee); } f => f }; - // Generate a location to store the result. If the user does - // not care about the result, just make a stack slot. - let opt_llretslot = dest.and_then(|dest| match dest { - expr::SaveIn(dst) => Some(dst), - expr::Ignore => { - let needs_drop = || bcx.fcx.type_needs_drop(output); - if fn_ty.ret.is_indirect() || fn_ty.ret.cast.is_some() || needs_drop() { - // Push the out-pointer if we use an out-pointer for this - // return type, otherwise push "undef". - if fn_ty.ret.is_ignore() { - Some(C_undef(fn_ty.ret.original_ty.ptr_to())) - } else { - let llresult = alloca(bcx, fn_ty.ret.original_ty, "__llret"); - call_lifetime_start(bcx, llresult); - Some(llresult) - } - } else { - None - } - } - }); - // If there no destination, return must be direct, with no cast. if opt_llretslot.is_none() { assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); @@ -669,17 +528,24 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, llargs.push(llretslot); } - let arg_cleanup_scope = fcx.push_custom_cleanup_scope(); - bcx = trans_args(bcx, abi, &fn_ty, &mut callee, args, &mut llargs, - cleanup::CustomScope(arg_cleanup_scope)); - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); + match callee { + Virtual(idx) => { + llargs.push(args[0]); + + let fn_ptr = meth::get_virtual_method(bcx, args[1], idx); + let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + callee = Fn(PointerCast(bcx, fn_ptr, llty)); + llargs.extend_from_slice(&args[2..]); + } + _ => llargs.extend_from_slice(args) + } let llfn = match callee { Fn(f) => f, _ => bug!("expected fn pointer callee, found {:?}", callee) }; - let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); + let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); if !bcx.unreachable.get() { fn_ty.apply_attrs_callsite(llret); @@ -695,283 +561,9 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } - fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope); - - // If the caller doesn't care about the result of this fn call, - // drop the temporary slot we made. - match (dest, opt_llretslot) { - (Some(expr::Ignore), Some(llretslot)) => { - // drop the value if it is not being saved. - bcx = glue::drop_ty(bcx, llretslot, output, debug_loc); - call_lifetime_end(bcx, llretslot); - } - _ => {} - } - - // FIXME(canndrew): This is_never should really be an is_uninhabited - if output.is_never() { + if fn_ret.0.is_never() { Unreachable(bcx); } Result::new(bcx, llret) } - -pub enum CallArgs<'a, 'tcx> { - /// Supply value of arguments as a list of expressions that must be - /// translated. This is used in the common case of `foo(bar, qux)`. - ArgExprs(&'a [P]), - - /// Supply value of arguments as a list of LLVM value refs; frequently - /// used with lang items and so forth, when the argument is an internal - /// value. - ArgVals(&'a [ValueRef]), - - /// For overloaded operators: `(lhs, Option(rhs))`. - /// `lhs` is the left-hand-side and `rhs` is the datum - /// of the right-hand-side argument (if any). - ArgOverloadedOp(Datum<'tcx, Expr>, Option>), - - /// Supply value of arguments as a list of expressions that must be - /// translated, for overloaded call operators. - ArgOverloadedCall(Vec<&'a hir::Expr>), -} - -fn trans_args_under_call_abi<'blk, 'tcx>( - mut bcx: Block<'blk, 'tcx>, - arg_exprs: &[P], - callee: &mut CalleeData, - fn_ty: &FnType, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> -{ - let mut arg_idx = 0; - - // Translate the `self` argument first. - let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0])); - bcx = trans_arg_datum(bcx, - arg_datum, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - - // Now untuple the rest of the arguments. - let tuple_expr = &arg_exprs[1]; - let tuple_type = common::node_id_type(bcx, tuple_expr.id); - - match tuple_type.sty { - ty::TyTuple(ref field_types) => { - let tuple_datum = unpack_datum!(bcx, - expr::trans(bcx, &tuple_expr)); - let tuple_lvalue_datum = - unpack_datum!(bcx, - tuple_datum.to_lvalue_datum(bcx, - "args", - tuple_expr.id)); - let repr = adt::represent_type(bcx.ccx(), tuple_type); - let repr_ptr = &repr; - for (i, field_type) in field_types.iter().enumerate() { - let arg_datum = tuple_lvalue_datum.get_element( - bcx, - field_type, - |srcval| { - adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i) - }).to_expr_datum(); - bcx = trans_arg_datum(bcx, - arg_datum, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - } - } - _ => { - span_bug!(tuple_expr.span, - "argument to `.call()` wasn't a tuple?!") - } - }; - - bcx -} - -pub fn trans_args<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - abi: Abi, - fn_ty: &FnType, - callee: &mut CalleeData, - args: CallArgs<'a, 'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> { - debug!("trans_args(abi={})", abi); - - let _icx = push_ctxt("trans_args"); - - let mut bcx = bcx; - let mut arg_idx = 0; - - // First we figure out the caller's view of the types of the arguments. - // This will be needed if this is a generic call, because the callee has - // to cast her view of the arguments to the caller's view. - match args { - ArgExprs(arg_exprs) => { - if abi == Abi::RustCall { - // This is only used for direct calls to the `call`, - // `call_mut` or `call_once` functions. - return trans_args_under_call_abi(bcx, - arg_exprs, callee, fn_ty, - llargs, - arg_cleanup_scope) - } - - for arg_expr in arg_exprs { - let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr)); - bcx = trans_arg_datum(bcx, - arg_datum, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - } - } - ArgOverloadedCall(arg_exprs) => { - for expr in arg_exprs { - let arg_datum = - unpack_datum!(bcx, expr::trans(bcx, expr)); - bcx = trans_arg_datum(bcx, - arg_datum, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - } - } - ArgOverloadedOp(lhs, rhs) => { - bcx = trans_arg_datum(bcx, lhs, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - - if let Some(rhs) = rhs { - bcx = trans_arg_datum(bcx, rhs, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - } - } - ArgVals(vs) => { - match *callee { - Virtual(idx) => { - llargs.push(vs[0]); - - let fn_ptr = meth::get_virtual_method(bcx, vs[1], idx); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); - *callee = Fn(PointerCast(bcx, fn_ptr, llty)); - llargs.extend_from_slice(&vs[2..]); - } - _ => llargs.extend_from_slice(vs) - } - } - } - - bcx -} - -fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - arg_datum: Datum<'tcx, Expr>, - callee: &mut CalleeData, - fn_ty: &FnType, - next_idx: &mut usize, - arg_cleanup_scope: cleanup::ScopeId, - llargs: &mut Vec) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_arg_datum"); - let mut bcx = bcx; - - debug!("trans_arg_datum({:?})", arg_datum); - - let arg = &fn_ty.args[*next_idx]; - *next_idx += 1; - - // Fill padding with undef value, where applicable. - if let Some(ty) = arg.pad { - llargs.push(C_undef(ty)); - } - - // Determine whether we want a by-ref datum even if not appropriate. - let want_by_ref = arg.is_indirect() || arg.cast.is_some(); - - let fat_ptr = common::type_is_fat_ptr(bcx.tcx(), arg_datum.ty); - let (by_ref, val) = if fat_ptr && !bcx.fcx.type_needs_drop(arg_datum.ty) { - (true, arg_datum.val) - } else { - // Make this an rvalue, since we are going to be - // passing ownership. - let arg_datum = unpack_datum!( - bcx, arg_datum.to_rvalue_datum(bcx, "arg")); - - // Now that arg_datum is owned, get it into the appropriate - // mode (ref vs value). - let arg_datum = unpack_datum!(bcx, if want_by_ref { - arg_datum.to_ref_datum(bcx) - } else { - arg_datum.to_appropriate_datum(bcx) - }); - - // Technically, ownership of val passes to the callee. - // However, we must cleanup should we panic before the - // callee is actually invoked. - (arg_datum.kind.is_by_ref(), - arg_datum.add_clean(bcx.fcx, arg_cleanup_scope)) - }; - - if arg.is_ignore() { - return bcx; - } - - debug!("--- trans_arg_datum passing {:?}", Value(val)); - - if fat_ptr { - // Fat pointers should be passed without any transformations. - assert!(!arg.is_indirect() && arg.cast.is_none()); - llargs.push(Load(bcx, expr::get_dataptr(bcx, val))); - - let info_arg = &fn_ty.args[*next_idx]; - *next_idx += 1; - assert!(!info_arg.is_indirect() && info_arg.cast.is_none()); - let info = Load(bcx, expr::get_meta(bcx, val)); - - if let Virtual(idx) = *callee { - // We have to grab the fn pointer from the vtable when - // handling the first argument, ensure that here. - assert_eq!(*next_idx, 2); - assert!(info_arg.is_ignore()); - let fn_ptr = meth::get_virtual_method(bcx, info, idx); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); - *callee = Fn(PointerCast(bcx, fn_ptr, llty)); - } else { - assert!(!info_arg.is_ignore()); - llargs.push(info); - } - return bcx; - } - - let mut val = val; - if by_ref && !arg.is_indirect() { - // Have to load the argument, maybe while casting it. - if arg.original_ty == Type::i1(bcx.ccx()) { - // We store bools as i8 so we need to truncate to i1. - val = LoadRangeAssert(bcx, val, 0, 2, llvm::False); - val = Trunc(bcx, val, arg.original_ty); - } else if let Some(ty) = arg.cast { - val = Load(bcx, PointerCast(bcx, val, ty.ptr_to())); - if !bcx.unreachable.get() { - let llalign = llalign_of_min(bcx.ccx(), arg.ty); - unsafe { - llvm::LLVMSetAlignment(val, llalign); - } - } - } else { - val = Load(bcx, val); - } - } - - llargs.push(val); - bcx -} diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 3081f055bb4dd..d368ce47430b7 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -114,37 +114,22 @@ //! code for `expr` itself is responsible for freeing any other byproducts //! that may be in play. -pub use self::ScopeId::*; -pub use self::CleanupScopeKind::*; pub use self::EarlyExitLabel::*; -pub use self::Heap::*; use llvm::{BasicBlockRef, ValueRef}; use base; use build; use common; -use common::{Block, FunctionContext, NodeIdAndSpan, LandingPad}; -use datum::{Datum, Lvalue}; -use debuginfo::{DebugLoc, ToDebugLoc}; +use common::{Block, FunctionContext, LandingPad}; +use debuginfo::{DebugLoc}; use glue; -use middle::region; use type_::Type; use value::Value; -use rustc::ty::{Ty, TyCtxt}; - -use std::fmt; -use syntax::ast; - -pub struct CleanupScope<'blk, 'tcx: 'blk> { - // The id of this cleanup scope. If the id is None, - // this is a *temporary scope* that is pushed during trans to - // cleanup miscellaneous garbage that trans may generate whose - // lifetime is a subset of some expression. See module doc for - // more details. - kind: CleanupScopeKind<'blk, 'tcx>, +use rustc::ty::Ty; +pub struct CleanupScope<'tcx> { // Cleanups to run upon scope exit. - cleanups: Vec>, + cleanups: Vec>, // The debug location any drop calls generated for this scope will be // associated with. @@ -159,37 +144,9 @@ pub struct CustomScopeIndex { index: usize } -pub const EXIT_BREAK: usize = 0; -pub const EXIT_LOOP: usize = 1; -pub const EXIT_MAX: usize = 2; - -pub enum CleanupScopeKind<'blk, 'tcx: 'blk> { - CustomScopeKind, - AstScopeKind(ast::NodeId), - LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX]) -} - -impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - CustomScopeKind => write!(f, "CustomScopeKind"), - AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid), - LoopScopeKind(nid, ref blks) => { - write!(f, "LoopScopeKind({}, [", nid)?; - for blk in blks { - write!(f, "{:p}, ", blk)?; - } - write!(f, "])") - } - } - } -} - #[derive(Copy, Clone, PartialEq, Debug)] pub enum EarlyExitLabel { UnwindExit(UnwindKind), - ReturnExit, - LoopExit(ast::NodeId, usize) } #[derive(Copy, Clone, Debug)] @@ -205,97 +162,8 @@ pub struct CachedEarlyExit { last_cleanup: usize, } -pub trait Cleanup<'tcx> { - fn must_unwind(&self) -> bool; - fn is_lifetime_end(&self) -> bool; - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx>; -} - -pub type CleanupObj<'tcx> = Box+'tcx>; - -#[derive(Copy, Clone, Debug)] -pub enum ScopeId { - AstScope(ast::NodeId), - CustomScope(CustomScopeIndex) -} - -#[derive(Copy, Clone, Debug)] -pub struct DropHint(pub ast::NodeId, pub K); - -pub type DropHintDatum<'tcx> = DropHint>; -pub type DropHintValue = DropHint; - -impl DropHint { - pub fn new(id: ast::NodeId, k: K) -> DropHint { DropHint(id, k) } -} - -impl DropHint { - pub fn value(&self) -> ValueRef { self.1 } -} - -pub trait DropHintMethods { - type ValueKind; - fn to_value(&self) -> Self::ValueKind; -} -impl<'tcx> DropHintMethods for DropHintDatum<'tcx> { - type ValueKind = DropHintValue; - fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) } -} - -impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { - /// Invoked when we start to trans the code contained within a new cleanup scope. - fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) { - debug!("push_ast_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(debug_loc.id)); - - // FIXME(#2202) -- currently closure bodies have a parent - // region, which messes up the assertion below, since there - // are no cleanup scopes on the stack at the start of - // trans'ing a closure body. I think though that this should - // eventually be fixed by closure bodies not having a parent - // region, though that's a touch unclear, and it might also be - // better just to narrow this assertion more (i.e., by - // excluding id's that correspond to closure bodies only). For - // now we just say that if there is already an AST scope on the stack, - // this new AST scope had better be its immediate child. - let top_scope = self.top_ast_scope(); - let region_maps = &self.ccx.tcx().region_maps; - if top_scope.is_some() { - assert!((region_maps - .opt_encl_scope(region_maps.node_extent(debug_loc.id)) - .map(|s|s.node_id(region_maps)) == top_scope) - || - (region_maps - .opt_encl_scope(region_maps.lookup_code_extent( - region::CodeExtentData::DestructionScope(debug_loc.id))) - .map(|s|s.node_id(region_maps)) == top_scope)); - } - - self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id), - debug_loc.debug_loc())); - } - - fn push_loop_cleanup_scope(&self, - id: ast::NodeId, - exits: [Block<'blk, 'tcx>; EXIT_MAX]) { - debug!("push_loop_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(id)); - assert_eq!(Some(id), self.top_ast_scope()); - - // Just copy the debuginfo source location from the enclosing scope - let debug_loc = self.scopes - .borrow() - .last() - .unwrap() - .debug_loc; - - self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc)); - } - - fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { +impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { + pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { let index = self.scopes_len(); debug!("push_custom_cleanup_scope(): {}", index); @@ -306,53 +174,14 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { .map(|opt_scope| opt_scope.debug_loc) .unwrap_or(DebugLoc::None); - self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc)); - CustomScopeIndex { index: index } - } - - fn push_custom_cleanup_scope_with_debug_loc(&self, - debug_loc: NodeIdAndSpan) - -> CustomScopeIndex { - let index = self.scopes_len(); - debug!("push_custom_cleanup_scope(): {}", index); - - self.push_scope(CleanupScope::new(CustomScopeKind, - debug_loc.debug_loc())); + self.push_scope(CleanupScope::new(debug_loc)); CustomScopeIndex { index: index } } - /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup - /// stack, and generates the code to do its cleanups for normal exit. - fn pop_and_trans_ast_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - cleanup_scope: ast::NodeId) - -> Block<'blk, 'tcx> { - debug!("pop_and_trans_ast_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(cleanup_scope)); - - assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope))); - - let scope = self.pop_scope(); - self.trans_scope_cleanups(bcx, &scope) - } - - /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the - /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by - /// branching to a block generated by `normal_exit_block`. - fn pop_loop_cleanup_scope(&self, - cleanup_scope: ast::NodeId) { - debug!("pop_loop_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(cleanup_scope)); - - assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope))); - - let _ = self.pop_scope(); - } - /// Removes the top cleanup scope from the stack without executing its cleanups. The top /// cleanup scope must be the temporary scope `custom_scope`. - fn pop_custom_cleanup_scope(&self, - custom_scope: CustomScopeIndex) { + pub fn pop_custom_cleanup_scope(&self, + custom_scope: CustomScopeIndex) { debug!("pop_custom_cleanup_scope({})", custom_scope.index); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); let _ = self.pop_scope(); @@ -360,10 +189,10 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Removes the top cleanup scope from the stack, which must be a temporary scope, and /// generates the code to do its cleanups for normal exit. - fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx> { + pub fn pop_and_trans_custom_cleanup_scope(&self, + bcx: Block<'blk, 'tcx>, + custom_scope: CustomScopeIndex) + -> Block<'blk, 'tcx> { debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); @@ -371,100 +200,27 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { self.trans_scope_cleanups(bcx, &scope) } - /// Returns the id of the top-most loop scope - fn top_loop_scope(&self) -> ast::NodeId { - for scope in self.scopes.borrow().iter().rev() { - if let LoopScopeKind(id, _) = scope.kind { - return id; - } - } - bug!("no loop scope found"); - } - - /// Returns a block to branch to which will perform all pending cleanups and - /// then break/continue (depending on `exit`) out of the loop with id - /// `cleanup_scope` - fn normal_exit_block(&'blk self, - cleanup_scope: ast::NodeId, - exit: usize) -> BasicBlockRef { - self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit)) - } - - /// Returns a block to branch to which will perform all pending cleanups and - /// then return from this function - fn return_exit_block(&'blk self) -> BasicBlockRef { - self.trans_cleanups_to_exit_scope(ReturnExit) - } - - fn schedule_lifetime_end(&self, - cleanup_scope: ScopeId, - val: ValueRef) { - let drop = box LifetimeEnd { - ptr: val, - }; - - debug!("schedule_lifetime_end({:?}, val={:?})", - cleanup_scope, Value(val)); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - /// Schedules a (deep) drop of `val`, which is a pointer to an instance of /// `ty` - fn schedule_drop_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>) { + pub fn schedule_drop_mem(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { if !self.type_needs_drop(ty) { return; } - let drop_hint = drop_hint.map(|hint|hint.to_value()); - let drop = box DropValue { + let drop = DropValue { is_immediate: false, val: val, ty: ty, - fill_on_drop: false, skip_dtor: false, - drop_hint: drop_hint, }; - debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}", cleanup_scope, Value(val), ty, - drop.fill_on_drop, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty` - fn schedule_drop_and_fill_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>) { - if !self.type_needs_drop(ty) { return; } - - let drop_hint = drop_hint.map(|datum|datum.to_value()); - let drop = box DropValue { - is_immediate: false, - val: val, - ty: ty, - fill_on_drop: true, - skip_dtor: false, - drop_hint: drop_hint, - }; - - debug!("schedule_drop_and_fill_mem({:?}, val={:?}, ty={:?}, - fill_on_drop={}, skip_dtor={}, has_drop_hint={})", - cleanup_scope, - Value(val), - ty, - drop.fill_on_drop, - drop.skip_dtor, - drop_hint.is_some()); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); + self.schedule_clean(cleanup_scope, drop); } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -472,110 +228,55 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - fn schedule_drop_adt_contents(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { + pub fn schedule_drop_adt_contents(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. if !self.type_needs_drop(ty) { return; } - let drop = box DropValue { + let drop = DropValue { is_immediate: false, val: val, ty: ty, - fill_on_drop: false, skip_dtor: true, - drop_hint: None, }; - debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}", cleanup_scope, Value(val), ty, - drop.fill_on_drop, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop as CleanupObj); + self.schedule_clean(cleanup_scope, drop); } /// Schedules a (deep) drop of `val`, which is an instance of `ty` - fn schedule_drop_immediate(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { + pub fn schedule_drop_immediate(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { if !self.type_needs_drop(ty) { return; } - let drop = Box::new(DropValue { + let drop = DropValue { is_immediate: true, val: val, ty: ty, - fill_on_drop: false, skip_dtor: false, - drop_hint: None, - }); + }; - debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}", cleanup_scope, Value(val), ty, - drop.fill_on_drop, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Schedules a call to `free(val)`. Note that this is a shallow operation. - fn schedule_free_value(&self, - cleanup_scope: ScopeId, - val: ValueRef, - heap: Heap, - content_ty: Ty<'tcx>) { - let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty }; - - debug!("schedule_free_value({:?}, val={:?}, heap={:?})", - cleanup_scope, Value(val), heap); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - fn schedule_clean(&self, - cleanup_scope: ScopeId, - cleanup: CleanupObj<'tcx>) { - match cleanup_scope { - AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup), - CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup), - } - } - - /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not - /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary - /// scope. - fn schedule_clean_in_ast_scope(&self, - cleanup_scope: ast::NodeId, - cleanup: CleanupObj<'tcx>) { - debug!("schedule_clean_in_ast_scope(cleanup_scope={})", - cleanup_scope); - - for scope in self.scopes.borrow_mut().iter_mut().rev() { - if scope.kind.is_ast_with_id(cleanup_scope) { - scope.cleanups.push(cleanup); - scope.cached_landing_pad = None; - return; - } else { - // will be adding a cleanup to some enclosing scope - scope.clear_cached_exits(); - } - } - - bug!("no cleanup scope {} found", - self.ccx.tcx().map.node_to_string(cleanup_scope)); + self.schedule_clean(cleanup_scope, drop); } /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. - fn schedule_clean_in_custom_scope(&self, - custom_scope: CustomScopeIndex, - cleanup: CleanupObj<'tcx>) { + fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) { debug!("schedule_clean_in_custom_scope(custom_scope={})", custom_scope.index); @@ -588,14 +289,14 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { } /// Returns true if there are pending cleanups that should execute on panic. - fn needs_invoke(&self) -> bool { + pub fn needs_invoke(&self) -> bool { self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) } /// Returns a basic block to branch to in the event of a panic. This block /// will run the panic cleanups and eventually resume the exception that /// caused the landing pad to be run. - fn get_landing_pad(&'blk self) -> BasicBlockRef { + pub fn get_landing_pad(&'blk self) -> BasicBlockRef { let _icx = base::push_ctxt("get_landing_pad"); debug!("get_landing_pad"); @@ -625,25 +326,6 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { return llbb; } -} - -impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { - /// Returns the id of the current top-most AST scope, if any. - fn top_ast_scope(&self) -> Option { - for scope in self.scopes.borrow().iter().rev() { - match scope.kind { - CustomScopeKind | LoopScopeKind(..) => {} - AstScopeKind(i) => { - return Some(i); - } - } - } - None - } - - fn top_nonempty_cleanup_scope(&self) -> Option { - self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty()) - } fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { self.is_valid_custom_scope(custom_scope) && @@ -652,14 +334,13 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { let scopes = self.scopes.borrow(); - custom_scope.index < scopes.len() && - (*scopes)[custom_scope.index].kind.is_temp() + custom_scope.index < scopes.len() } /// Generates the cleanups for `scope` into `bcx` fn trans_scope_cleanups(&self, // cannot borrow self, will recurse bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> { + scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> { let mut bcx = bcx; if !bcx.unreachable.get() { @@ -674,11 +355,11 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx self.scopes.borrow().len() } - fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) { + fn push_scope(&self, scope: CleanupScope<'tcx>) { self.scopes.borrow_mut().push(scope) } - fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> { + fn pop_scope(&self) -> CleanupScope<'tcx> { debug!("popping cleanup scope {}, {} scopes remaining", self.top_scope(|s| s.block_name("")), self.scopes_len() - 1); @@ -686,7 +367,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx self.scopes.borrow_mut().pop().unwrap() } - fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R { + fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R { f(self.scopes.borrow().last().unwrap()) } @@ -738,7 +419,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx UnwindExit(val) => { // Generate a block that will resume unwinding to the // calling function - let bcx = self.new_block("resume", None); + let bcx = self.new_block("resume"); match val { UnwindKind::LandingPad => { let addr = self.landingpad_alloca.get() @@ -755,15 +436,6 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx prev_llbb = bcx.llbb; break; } - - ReturnExit => { - prev_llbb = self.get_llreturn(); - break - } - - LoopExit(id, _) => { - bug!("cannot exit from scope {}, not in scope", id); - } } } @@ -782,20 +454,6 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx skip = last_cleanup; break; } - - // If we are searching for a loop exit, - // and this scope is that loop, then stop popping and set - // `prev_llbb` to the appropriate exit block from the loop. - let scope = popped_scopes.last().unwrap(); - match label { - UnwindExit(..) | ReturnExit => { } - LoopExit(id, exit) => { - if let Some(exit) = scope.kind.early_exit_block(id, exit) { - prev_llbb = exit; - break - } - } - } } debug!("trans_cleanups_to_exit_scope: popped {} scopes", @@ -826,7 +484,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx let name = scope.block_name("clean"); debug!("generating cleanups for {}", name); - let bcx_in = self.new_block(&name[..], None); + let bcx_in = self.new_block(&name[..]); let exit_label = label.start(bcx_in); let mut bcx_out = bcx_in; let len = scope.cleanups.len(); @@ -869,7 +527,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx Some(llbb) => return llbb, None => { let name = last_scope.block_name("unwind"); - pad_bcx = self.new_block(&name[..], None); + pad_bcx = self.new_block(&name[..]); last_scope.cached_landing_pad = Some(pad_bcx.llbb); } } @@ -923,12 +581,9 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx } } -impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { - fn new(kind: CleanupScopeKind<'blk, 'tcx>, - debug_loc: DebugLoc) - -> CleanupScope<'blk, 'tcx> { +impl<'tcx> CleanupScope<'tcx> { + fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> { CleanupScope { - kind: kind, debug_loc: debug_loc, cleanups: vec!(), cached_early_exits: vec!(), @@ -936,11 +591,6 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { } } - fn clear_cached_exits(&mut self) { - self.cached_early_exits = vec!(); - self.cached_landing_pad = None; - } - fn cached_early_exit(&self, label: EarlyExitLabel) -> Option<(BasicBlockRef, usize)> { @@ -961,62 +611,13 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { /// True if this scope has cleanups that need unwinding fn needs_invoke(&self) -> bool { - self.cached_landing_pad.is_some() || - self.cleanups.iter().any(|c| c.must_unwind()) + !self.cleanups.is_empty() } /// Returns a suitable name to use for the basic block that handles this cleanup scope fn block_name(&self, prefix: &str) -> String { - match self.kind { - CustomScopeKind => format!("{}_custom_", prefix), - AstScopeKind(id) => format!("{}_ast_{}_", prefix, id), - LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id), - } - } - - /// Manipulate cleanup scope for call arguments. Conceptually, each - /// argument to a call is an lvalue, and performing the call moves each - /// of the arguments into a new rvalue (which gets cleaned up by the - /// callee). As an optimization, instead of actually performing all of - /// those moves, trans just manipulates the cleanup scope to obtain the - /// same effect. - pub fn drop_non_lifetime_clean(&mut self) { - self.cleanups.retain(|c| c.is_lifetime_end()); - self.clear_cached_exits(); - } -} - -impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> { - fn is_temp(&self) -> bool { - match *self { - CustomScopeKind => true, - LoopScopeKind(..) | AstScopeKind(..) => false, - } - } - - fn is_ast_with_id(&self, id: ast::NodeId) -> bool { - match *self { - CustomScopeKind | LoopScopeKind(..) => false, - AstScopeKind(i) => i == id - } - } - - fn is_loop_with_id(&self, id: ast::NodeId) -> bool { - match *self { - CustomScopeKind | AstScopeKind(..) => false, - LoopScopeKind(i, _) => i == id - } - } - - /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None` - fn early_exit_block(&self, - id: ast::NodeId, - exit: usize) -> Option { - match *self { - LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb), - _ => None, - } + format!("{}_custom_", prefix) } } @@ -1057,7 +658,6 @@ impl EarlyExitLabel { bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu()))); *self } - label => label, } } } @@ -1080,20 +680,10 @@ pub struct DropValue<'tcx> { is_immediate: bool, val: ValueRef, ty: Ty<'tcx>, - fill_on_drop: bool, skip_dtor: bool, - drop_hint: Option, } -impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { - fn must_unwind(&self) -> bool { - true - } - - fn is_lifetime_end(&self) -> bool { - false - } - +impl<'tcx> DropValue<'tcx> { fn trans<'blk>(&self, bcx: Block<'blk, 'tcx>, debug_loc: DebugLoc) @@ -1107,180 +697,8 @@ impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { let bcx = if self.is_immediate { glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) } else { - glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint) + glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) }; - if self.fill_on_drop { - base::drop_done_fill_mem(bcx, self.val, self.ty); - } bcx } } - -#[derive(Copy, Clone, Debug)] -pub enum Heap { - HeapExchange -} - -#[derive(Copy, Clone)] -pub struct FreeValue<'tcx> { - ptr: ValueRef, - heap: Heap, - content_ty: Ty<'tcx> -} - -impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> { - fn must_unwind(&self) -> bool { - true - } - - fn is_lifetime_end(&self) -> bool { - false - } - - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - match self.heap { - HeapExchange => { - glue::trans_exchange_free_ty(bcx, - self.ptr, - self.content_ty, - debug_loc) - } - } - } -} - -#[derive(Copy, Clone)] -pub struct LifetimeEnd { - ptr: ValueRef, -} - -impl<'tcx> Cleanup<'tcx> for LifetimeEnd { - fn must_unwind(&self) -> bool { - false - } - - fn is_lifetime_end(&self) -> bool { - true - } - - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - debug_loc.apply(bcx.fcx); - base::call_lifetime_end(bcx, self.ptr); - bcx - } -} - -pub fn temporary_scope(tcx: TyCtxt, - id: ast::NodeId) - -> ScopeId { - match tcx.region_maps.temporary_scope(id) { - Some(scope) => { - let r = AstScope(scope.node_id(&tcx.region_maps)); - debug!("temporary_scope({}) = {:?}", id, r); - r - } - None => { - bug!("no temporary scope available for expr {}", id) - } - } -} - -pub fn var_scope(tcx: TyCtxt, - id: ast::NodeId) - -> ScopeId { - let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps)); - debug!("var_scope({}) = {:?}", id, r); - r -} - -/////////////////////////////////////////////////////////////////////////// -// These traits just exist to put the methods into this file. - -pub trait CleanupMethods<'blk, 'tcx> { - fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan); - fn push_loop_cleanup_scope(&self, - id: ast::NodeId, - exits: [Block<'blk, 'tcx>; EXIT_MAX]); - fn push_custom_cleanup_scope(&self) -> CustomScopeIndex; - fn push_custom_cleanup_scope_with_debug_loc(&self, - debug_loc: NodeIdAndSpan) - -> CustomScopeIndex; - fn pop_and_trans_ast_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - cleanup_scope: ast::NodeId) - -> Block<'blk, 'tcx>; - fn pop_loop_cleanup_scope(&self, - cleanup_scope: ast::NodeId); - fn pop_custom_cleanup_scope(&self, - custom_scope: CustomScopeIndex); - fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx>; - fn top_loop_scope(&self) -> ast::NodeId; - fn normal_exit_block(&'blk self, - cleanup_scope: ast::NodeId, - exit: usize) -> BasicBlockRef; - fn return_exit_block(&'blk self) -> BasicBlockRef; - fn schedule_lifetime_end(&self, - cleanup_scope: ScopeId, - val: ValueRef); - fn schedule_drop_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>); - fn schedule_drop_and_fill_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>); - fn schedule_drop_adt_contents(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>); - fn schedule_drop_immediate(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>); - fn schedule_free_value(&self, - cleanup_scope: ScopeId, - val: ValueRef, - heap: Heap, - content_ty: Ty<'tcx>); - fn schedule_clean(&self, - cleanup_scope: ScopeId, - cleanup: CleanupObj<'tcx>); - fn schedule_clean_in_ast_scope(&self, - cleanup_scope: ast::NodeId, - cleanup: CleanupObj<'tcx>); - fn schedule_clean_in_custom_scope(&self, - custom_scope: CustomScopeIndex, - cleanup: CleanupObj<'tcx>); - fn needs_invoke(&self) -> bool; - fn get_landing_pad(&'blk self) -> BasicBlockRef; -} - -trait CleanupHelperMethods<'blk, 'tcx> { - fn top_ast_scope(&self) -> Option; - fn top_nonempty_cleanup_scope(&self) -> Option; - fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; - fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; - fn trans_scope_cleanups(&self, - bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>; - fn trans_cleanups_to_exit_scope(&'blk self, - label: EarlyExitLabel) - -> BasicBlockRef; - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef; - fn scopes_len(&self) -> usize; - fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>); - fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>; - fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R; -} diff --git a/src/librustc_trans/closure.rs b/src/librustc_trans/closure.rs index 77b2c43167cfd..842a8fddb83e1 100644 --- a/src/librustc_trans/closure.rs +++ b/src/librustc_trans/closure.rs @@ -10,113 +10,21 @@ use arena::TypedArena; use back::symbol_names; -use llvm::{self, ValueRef, get_param, get_params}; +use llvm::{self, ValueRef, get_params}; use rustc::hir::def_id::DefId; use abi::{Abi, FnType}; -use adt; use attributes; use base::*; -use build::*; -use callee::{self, ArgVals, Callee}; -use cleanup::{CleanupMethods, CustomScope, ScopeId}; +use callee::{self, Callee}; use common::*; -use datum::{ByRef, Datum, lvalue_scratch_datum}; -use datum::{rvalue_scratch_datum, Rvalue}; -use debuginfo::{self, DebugLoc}; +use debuginfo::{DebugLoc}; use declare; -use expr; use monomorphize::{Instance}; use value::Value; -use Disr; use rustc::ty::{self, Ty, TyCtxt}; -use session::config::FullDebugInfo; - -use syntax::ast; use rustc::hir; -use libc::c_uint; - -fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - closure_def_id: DefId, - arg_scope_id: ScopeId, - id: ast::NodeId) { - let _icx = push_ctxt("closure::load_closure_environment"); - let kind = kind_for_closure(bcx.ccx(), closure_def_id); - - let env_arg = &bcx.fcx.fn_ty.args[0]; - let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize; - - // Special case for small by-value selfs. - let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() { - let closure_ty = node_id_type(bcx, id); - let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val; - env_arg.store_fn_arg(&bcx.build(), &mut env_idx, llenv); - llenv - } else { - get_param(bcx.fcx.llfn, env_idx as c_uint) - }; - - // Store the pointer to closure data in an alloca for debug info because that's what the - // llvm.dbg.declare intrinsic expects - let env_pointer_alloca = if bcx.sess().opts.debuginfo == FullDebugInfo { - let alloc = alloca(bcx, val_ty(llenv), "__debuginfo_env_ptr"); - Store(bcx, llenv, alloc); - Some(alloc) - } else { - None - }; - - bcx.tcx().with_freevars(id, |fv| { - for (i, freevar) in fv.iter().enumerate() { - let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), - closure_expr_id: id }; - let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap(); - let mut upvar_ptr = StructGEP(bcx, llenv, i); - let captured_by_ref = match upvar_capture { - ty::UpvarCapture::ByValue => false, - ty::UpvarCapture::ByRef(..) => { - upvar_ptr = Load(bcx, upvar_ptr); - true - } - }; - let node_id = freevar.def.var_id(); - bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr); - - if kind == ty::ClosureKind::FnOnce && !captured_by_ref { - let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id); - bcx.fcx.schedule_drop_mem(arg_scope_id, - upvar_ptr, - node_id_type(bcx, node_id), - hint) - } - - if let Some(env_pointer_alloca) = env_pointer_alloca { - debuginfo::create_captured_var_metadata( - bcx, - node_id, - env_pointer_alloca, - i, - captured_by_ref, - freevar.span); - } - } - }) -} - -pub enum ClosureEnv { - NotClosure, - Closure(DefId, ast::NodeId), -} - -impl ClosureEnv { - pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) { - if let ClosureEnv::Closure(def_id, id) = self { - load_closure_environment(bcx, def_id, arg_scope, id); - } - } -} - fn get_self_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, closure_id: DefId, fn_ty: Ty<'tcx>) @@ -181,68 +89,15 @@ fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, llfn } -fn translating_closure_body_via_mir_will_fail(ccx: &CrateContext, - closure_def_id: DefId) - -> bool { - let default_to_mir = ccx.sess().opts.debugging_opts.orbit; - let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" }; - let use_mir = default_to_mir ^ ccx.tcx().has_attr(closure_def_id, invert); - - !use_mir -} - pub fn trans_closure_body_via_mir<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, closure_def_id: DefId, closure_substs: ty::ClosureSubsts<'tcx>) { - use syntax::ast::DUMMY_NODE_ID; - use syntax_pos::DUMMY_SP; - use syntax::ptr::P; - - trans_closure_expr(Dest::Ignore(ccx), - &hir::FnDecl { - inputs: P::new(), - output: hir::Return(P(hir::Ty { - id: DUMMY_NODE_ID, - span: DUMMY_SP, - node: hir::Ty_::TyNever, - })), - variadic: false - }, - &hir::Block { - stmts: P::new(), - expr: None, - id: DUMMY_NODE_ID, - rules: hir::DefaultBlock, - span: DUMMY_SP - }, - DUMMY_NODE_ID, - closure_def_id, - closure_substs); -} - -pub enum Dest<'a, 'tcx: 'a> { - SaveIn(Block<'a, 'tcx>, ValueRef), - Ignore(&'a CrateContext<'a, 'tcx>) -} - -pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - id: ast::NodeId, - closure_def_id: DefId, // (*) - closure_substs: ty::ClosureSubsts<'tcx>) - -> Option> -{ // (*) Note that in the case of inlined functions, the `closure_def_id` will be the // defid of the closure in its original crate, whereas `id` will be the id of the local // inlined copy. - debug!("trans_closure_expr(id={:?}, closure_def_id={:?}, closure_substs={:?})", - id, closure_def_id, closure_substs); + debug!("trans_closure_body_via_mir(closure_def_id={:?}, closure_substs={:?})", + closure_def_id, closure_substs); - let ccx = match dest { - Dest::SaveIn(bcx, _) => bcx.ccx(), - Dest::Ignore(ccx) => ccx - }; let tcx = ccx.tcx(); let _icx = push_ctxt("closure::trans_closure_expr"); @@ -285,52 +140,13 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, }; trans_closure(ccx, - decl, - body, llfn, Instance::new(closure_def_id, param_substs), - id, &sig, - Abi::RustCall, - ClosureEnv::Closure(closure_def_id, id)); + Abi::RustCall); ccx.instances().borrow_mut().insert(instance, llfn); } - - // Don't hoist this to the top of the function. It's perfectly legitimate - // to have a zero-size closure (in which case dest will be `Ignore`) and - // we must still generate the closure body. - let (mut bcx, dest_addr) = match dest { - Dest::SaveIn(bcx, p) => (bcx, p), - Dest::Ignore(_) => { - debug!("trans_closure_expr() ignoring result"); - return None; - } - }; - - let repr = adt::represent_type(ccx, node_id_type(bcx, id)); - - // Create the closure. - tcx.with_freevars(id, |fv| { - for (i, freevar) in fv.iter().enumerate() { - let datum = expr::trans_var(bcx, freevar.def); - let upvar_slot_dest = adt::trans_field_ptr( - bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i); - let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), - closure_expr_id: id }; - match tcx.upvar_capture(upvar_id).unwrap() { - ty::UpvarCapture::ByValue => { - bcx = datum.store_to(bcx, upvar_slot_dest); - } - ty::UpvarCapture::ByRef(..) => { - Store(bcx, datum.to_llref(), upvar_slot_dest); - } - } - } - }); - adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0)); - - Some(bcx) } pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, @@ -347,32 +163,7 @@ pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, if !ccx.sess().target.target.options.allows_weak_linkage && !ccx.sess().opts.single_codegen_unit() { - if let Some(node_id) = ccx.tcx().map.as_local_node_id(closure_def_id) { - // If the closure is defined in the local crate, we can always just - // translate it. - let (decl, body) = match ccx.tcx().map.expect_expr(node_id).node { - hir::ExprClosure(_, ref decl, ref body, _) => (decl, body), - _ => { unreachable!() } - }; - - trans_closure_expr(Dest::Ignore(ccx), - decl, - body, - node_id, - closure_def_id, - substs); - } else { - // If the closure is defined in an upstream crate, we can only - // translate it if MIR-trans is active. - - if translating_closure_body_via_mir_will_fail(ccx, closure_def_id) { - ccx.sess().fatal("You have run into a known limitation of the \ - MingW toolchain. Either compile with -Zorbit or \ - with -Ccodegen-units=1 to work around it."); - } - - trans_closure_body_via_mir(ccx, closure_def_id, substs); - } + trans_closure_body_via_mir(ccx, closure_def_id, substs); } // If the closure is a Fn closure, but a FnOnce is needed (etc), @@ -472,28 +263,21 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false, None); + let mut bcx = fcx.init(false); // the first argument (`self`) will be the (by value) closure env. - let self_scope = fcx.push_custom_cleanup_scope(); - let self_scope_id = CustomScope(self_scope); let mut llargs = get_params(fcx.llfn); let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize; let env_arg = &fcx.fn_ty.args[0]; let llenv = if env_arg.is_indirect() { - Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef)) - .add_clean(&fcx, self_scope_id) + llargs[self_idx] } else { - unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self", - InitAlloca::Dropped, - self_scope_id, |bcx, llval| { - let mut llarg_idx = self_idx; - env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, llval); - bcx.fcx.schedule_lifetime_end(self_scope_id, llval); - bcx - })).val + let scratch = alloc_ty(bcx, closure_ty, "self"); + let mut llarg_idx = self_idx; + env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch); + scratch }; debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv)); @@ -510,15 +294,19 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( llargs[self_idx] = llenv; } - let dest = - fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))); + let dest = fcx.llretslotptr.get(); let callee = Callee { data: callee::Fn(llreffn), ty: llref_fn_ty }; - bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx; + + // Call the by-ref closure body with `self` in a cleanup scope, + // to drop `self` when the body returns, or in case it unwinds. + let self_scope = fcx.push_custom_cleanup_scope(); + fcx.schedule_drop_mem(self_scope, llenv, closure_ty); + + bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx; fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index b1aaea7d984c9..c5053e4feee62 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -16,7 +16,6 @@ use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef}; -use rustc::cfg; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; @@ -30,7 +29,6 @@ use builder::Builder; use callee::Callee; use cleanup; use consts; -use datum; use debuginfo::{self, DebugLoc}; use declare; use machine; @@ -43,7 +41,6 @@ use rustc::ty::layout::Layout; use rustc::traits::{self, SelectionContext, Reveal}; use rustc::ty::fold::TypeFoldable; use rustc::hir; -use util::nodemap::NodeMap; use arena::TypedArena; use libc::{c_uint, c_char}; @@ -202,16 +199,6 @@ pub fn gensym_name(name: &str) -> ast::Name { use Disr; -#[derive(Copy, Clone)] -pub struct NodeIdAndSpan { - pub id: ast::NodeId, - pub span: Span, -} - -pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan { - NodeIdAndSpan { id: expr.id, span: expr.span } -} - /// The concrete version of ty::FieldDef. The name is the field index if /// the field is numeric. pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>); @@ -257,17 +244,6 @@ impl<'a, 'tcx> VariantInfo<'tcx> { } } } - - /// Return the variant corresponding to a given node (e.g. expr) - pub fn of_node(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self { - Self::from_ty(tcx, ty, Some(tcx.expect_def(id))) - } - - pub fn field_index(&self, name: ast::Name) -> usize { - self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| { - bug!("unknown field `{}`", name) - }) - } } pub struct BuilderRef_res { @@ -292,37 +268,6 @@ pub fn validate_substs(substs: &Substs) { assert!(!substs.types.needs_infer()); } -// work around bizarre resolve errors -type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>; -pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>; - -#[derive(Clone, Debug)] -struct HintEntry<'tcx> { - // The datum for the dropflag-hint itself; note that many - // source-level Lvalues will be associated with the same - // dropflag-hint datum. - datum: cleanup::DropHintDatum<'tcx>, -} - -pub struct DropFlagHintsMap<'tcx> { - // Maps NodeId for expressions that read/write unfragmented state - // to that state's drop-flag "hint." (A stack-local hint - // indicates either that (1.) it is certain that no-drop is - // needed, or (2.) inline drop-flag must be consulted.) - node_map: NodeMap>, -} - -impl<'tcx> DropFlagHintsMap<'tcx> { - pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } } - pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) } - pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) { - self.node_map.insert(id, HintEntry { datum: datum }); - } - pub fn hint_datum(&self, id: ast::NodeId) -> Option> { - self.node_map.get(&id).map(|t|t.datum) - } -} - // Function context. Every LLVM function we create will have one of // these. pub struct FunctionContext<'a, 'tcx: 'a> { @@ -352,12 +297,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // A marker for the place where we want to insert the function's static // allocas, so that LLVM will coalesce them into a single alloca call. pub alloca_insert_pt: Cell>, - pub llreturn: Cell>, - - // If the function has any nested return's, including something like: - // fn foo() -> Option { Some(Foo { x: return None }) }, then - // we use a separate alloca for each return - pub needs_ret_allocas: bool, // When working with landingpad-based exceptions this value is alloca'd and // later loaded when using the resume instruction. This ends up being @@ -367,17 +306,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Note that for cleanuppad-based exceptions this is not used. pub landingpad_alloca: Cell>, - // Maps the DefId's for local variables to the allocas created for - // them in llallocas. - pub lllocals: RefCell>>, - - // Same as above, but for closure upvars - pub llupvars: RefCell>, - - // Carries info about drop-flags for local bindings (longer term, - // paths) for the code being compiled. - pub lldropflag_hints: RefCell>, - // Describes the return/argument LLVM types and their ABI handling. pub fn_ty: FnType, @@ -402,9 +330,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { pub debug_context: debuginfo::FunctionDebugContext, // Cleanup scopes. - pub scopes: RefCell>>, - - pub cfg: Option, + pub scopes: RefCell>>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { @@ -420,70 +346,18 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } } - pub fn get_llreturn(&self) -> BasicBlockRef { - if self.llreturn.get().is_none() { - - self.llreturn.set(Some(unsafe { - llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, - "return\0".as_ptr() as *const _) - })) - } - - self.llreturn.get().unwrap() - } - - pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef { - if self.needs_ret_allocas { - base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name) - } else { - self.llretslotptr.get().unwrap() - } - } - pub fn new_block(&'a self, - name: &str, - opt_node_id: Option) + name: &str) -> Block<'a, 'tcx> { unsafe { let name = CString::new(name).unwrap(); let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, name.as_ptr()); - BlockS::new(llbb, opt_node_id, self) + BlockS::new(llbb, self) } } - pub fn new_id_block(&'a self, - name: &str, - node_id: ast::NodeId) - -> Block<'a, 'tcx> { - self.new_block(name, Some(node_id)) - } - - pub fn new_temp_block(&'a self, - name: &str) - -> Block<'a, 'tcx> { - self.new_block(name, None) - } - - pub fn join_blocks(&'a self, - id: ast::NodeId, - in_cxs: &[Block<'a, 'tcx>]) - -> Block<'a, 'tcx> { - let out = self.new_id_block("join", id); - let mut reachable = false; - for bcx in in_cxs { - if !bcx.unreachable.get() { - build::Br(*bcx, out.llbb, DebugLoc::None); - reachable = true; - } - } - if !reachable { - build::Unreachable(out); - } - return out; - } - pub fn monomorphize(&self, value: &T) -> T where T: TransNormalize<'tcx> { @@ -523,7 +397,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { let tcx = ccx.tcx(); match tcx.lang_items.eh_personality() { Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => { - Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx).val + Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx) } _ => { if let Some(llpersonality) = ccx.eh_personality().get() { @@ -565,12 +439,12 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { let unwresume = ccx.eh_unwind_resume(); if let Some(llfn) = unwresume.get() { - return Callee::ptr(datum::immediate_rvalue(llfn, ty)); + return Callee::ptr(llfn, ty); } let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty); attributes::unwind(llfn, true); unwresume.set(Some(llfn)); - Callee::ptr(datum::immediate_rvalue(llfn, ty)) + Callee::ptr(llfn, ty) } } @@ -593,10 +467,6 @@ pub struct BlockS<'blk, 'tcx: 'blk> { // kind of landing pad its in, otherwise this is none. pub lpad: Cell>, - // AST node-id associated with this block, if any. Used for - // debugging purposes only. - pub opt_node_id: Option, - // The function context for the function to which this block is // attached. pub fcx: &'blk FunctionContext<'blk, 'tcx>, @@ -606,7 +476,6 @@ pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>; impl<'blk, 'tcx> BlockS<'blk, 'tcx> { pub fn new(llbb: BasicBlockRef, - opt_node_id: Option, fcx: &'blk FunctionContext<'blk, 'tcx>) -> Block<'blk, 'tcx> { fcx.block_arena.alloc(BlockS { @@ -614,7 +483,6 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { terminated: Cell::new(false), unreachable: Cell::new(false), lpad: Cell::new(None), - opt_node_id: opt_node_id, fcx: fcx }) } @@ -883,13 +751,6 @@ pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef { } } -pub fn C_floating(s: &str, t: Type) -> ValueRef { - unsafe { - let s = CString::new(s).unwrap(); - llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr()) - } -} - pub fn C_floating_f64(f: f64, t: Type) -> ValueRef { unsafe { llvm::LLVMConstReal(t.to_ref(), f) @@ -916,19 +777,6 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef { C_integral(Type::i64(ccx), i, false) } -pub fn C_int(ccx: &CrateContext, i: I) -> ValueRef { - let v = i.as_i64(); - - let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type()); - - if bit_size < 64 { - // make sure it doesn't overflow - assert!(v < (1<<(bit_size-1)) && v >= -(1<<(bit_size-1))); - } - - C_integral(ccx.int_type(), v as u64, true) -} - pub fn C_uint(ccx: &CrateContext, i: I) -> ValueRef { let v = i.as_u64(); @@ -1099,24 +947,6 @@ pub fn is_null(val: ValueRef) -> bool { } } -pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { - bcx.fcx.monomorphize(&t) -} - -pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> { - let tcx = bcx.tcx(); - let t = tcx.node_id_to_type(id); - monomorphize_type(bcx, t) -} - -pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> { - node_id_type(bcx, ex.id) -} - -pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> { - monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex)) -} - /// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we /// do not (necessarily) resolve all nested obligations on the impl. Note that type check should /// guarantee to us that all nested obligations *could be* resolved if we wanted to. @@ -1230,34 +1060,6 @@ pub fn langcall(tcx: TyCtxt, } } -/// Return the VariantDef corresponding to an inlined variant node -pub fn inlined_variant_def<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - inlined_vid: ast::NodeId) - -> ty::VariantDef<'tcx> -{ - let ctor_ty = ccx.tcx().node_id_to_type(inlined_vid); - debug!("inlined_variant_def: ctor_ty={:?} inlined_vid={:?}", ctor_ty, - inlined_vid); - let adt_def = match ctor_ty.sty { - ty::TyFnDef(_, _, &ty::BareFnTy { sig: ty::Binder(ty::FnSig { - output, .. - }), ..}) => output, - _ => ctor_ty - }.ty_adt_def().unwrap(); - let variant_def_id = if ccx.tcx().map.is_inlined_node_id(inlined_vid) { - ccx.defid_for_inlined_node(inlined_vid).unwrap() - } else { - ccx.tcx().map.local_def_id(inlined_vid) - }; - - adt_def.variants - .iter() - .find(|v| variant_def_id == v.did) - .unwrap_or_else(|| { - bug!("no variant for {:?}::{}", adt_def, inlined_vid) - }) -} - // To avoid UB from LLVM, these two functions mask RHS with an // appropriate mask unconditionally (i.e. the fallback behavior for // all shifts). For 32- and 64-bit types, this matches the semantics diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 0e9898896778c..fa1e008d496e4 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -11,98 +11,26 @@ use llvm; use llvm::{SetUnnamedAddr}; -use llvm::{InternalLinkage, ValueRef, Bool, True}; -use middle::const_qualif::ConstQualif; -use rustc_const_eval::{ConstEvalErr, lookup_const_fn_by_id, lookup_const_by_id, ErrKind}; -use rustc_const_eval::{eval_length, report_const_eval_err, note_const_eval_err}; -use rustc::hir::def::Def; +use llvm::{InternalLinkage, ValueRef, True}; +use rustc_const_eval::ConstEvalErr; use rustc::hir::def_id::DefId; use rustc::hir::map as hir_map; -use {abi, adt, closure, debuginfo, expr, machine}; +use {debuginfo, machine}; use base::{self, push_ctxt}; -use callee::Callee; use trans_item::TransItem; -use common::{type_is_sized, C_nil, const_get_elt}; -use common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty}; -use common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint}; -use common::{type_is_fat_ptr, Field, C_vector, C_array, C_null}; -use datum::{Datum, Lvalue}; +use common::{CrateContext, val_ty}; use declare; -use monomorphize::{self, Instance}; +use monomorphize::{Instance}; use type_::Type; use type_of; -use value::Value; -use Disr; -use rustc::ty::subst::Substs; -use rustc::ty::adjustment::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer}; -use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::cast::{CastTy,IntTy}; -use util::nodemap::NodeMap; -use rustc_const_math::{ConstInt, ConstUsize, ConstIsize}; +use rustc::ty; use rustc::hir; use std::ffi::{CStr, CString}; -use libc::c_uint; -use syntax::ast::{self, LitKind}; +use syntax::ast; use syntax::attr::{self, AttrMetaMethods}; use syntax::parse::token; -use syntax::ptr::P; -use syntax_pos::Span; - -pub type FnArgMap<'a> = Option<&'a NodeMap>; - -pub fn const_lit(cx: &CrateContext, e: &hir::Expr, lit: &ast::Lit) - -> ValueRef { - let _icx = push_ctxt("trans_lit"); - debug!("const_lit: {:?}", lit); - match lit.node { - LitKind::Byte(b) => C_integral(Type::uint_from_ty(cx, ast::UintTy::U8), b as u64, false), - LitKind::Char(i) => C_integral(Type::char(cx), i as u64, false), - LitKind::Int(i, ast::LitIntType::Signed(t)) => { - C_integral(Type::int_from_ty(cx, t), i, true) - } - LitKind::Int(u, ast::LitIntType::Unsigned(t)) => { - C_integral(Type::uint_from_ty(cx, t), u, false) - } - LitKind::Int(i, ast::LitIntType::Unsuffixed) => { - let lit_int_ty = cx.tcx().node_id_to_type(e.id); - match lit_int_ty.sty { - ty::TyInt(t) => { - C_integral(Type::int_from_ty(cx, t), i as u64, true) - } - ty::TyUint(t) => { - C_integral(Type::uint_from_ty(cx, t), i as u64, false) - } - _ => span_bug!(lit.span, - "integer literal has type {:?} (expected int \ - or usize)", - lit_int_ty) - } - } - LitKind::Float(ref fs, t) => { - C_floating(&fs, Type::float_from_ty(cx, t)) - } - LitKind::FloatUnsuffixed(ref fs) => { - let lit_float_ty = cx.tcx().node_id_to_type(e.id); - match lit_float_ty.sty { - ty::TyFloat(t) => { - C_floating(&fs, Type::float_from_ty(cx, t)) - } - _ => { - span_bug!(lit.span, - "floating point literal doesn't have the right type"); - } - } - } - LitKind::Bool(b) => C_bool(cx, b), - LitKind::Str(ref s, _) => C_str_slice(cx, (*s).clone()), - LitKind::ByteStr(ref data) => { - addr_of(cx, C_bytes(cx, &data[..]), 1, "byte_str") - } - } -} pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef { unsafe { @@ -154,868 +82,13 @@ pub fn addr_of(ccx: &CrateContext, gv } -/// Deref a constant pointer -pub fn load_const(cx: &CrateContext, v: ValueRef, t: Ty) -> ValueRef { - let v = match cx.const_unsized().borrow().get(&v) { - Some(&v) => v, - None => v - }; - let d = unsafe { llvm::LLVMGetInitializer(v) }; - if !d.is_null() && t.is_bool() { - unsafe { llvm::LLVMConstTrunc(d, Type::i1(cx).to_ref()) } - } else { - d - } -} - -fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - v: ValueRef, - ty: Ty<'tcx>) - -> (ValueRef, Ty<'tcx>) { - match ty.builtin_deref(true, ty::NoPreference) { - Some(mt) => { - if type_is_sized(cx.tcx(), mt.ty) { - (load_const(cx, v, mt.ty), mt.ty) - } else { - // Derefing a fat pointer does not change the representation, - // just the type to the unsized contents. - (v, mt.ty) - } - } - None => { - bug!("unexpected dereferenceable type {:?}", ty) - } - } -} - -fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>, - arg_vals: &[ValueRef], - param_substs: &'tcx Substs<'tcx>, - trueconst: TrueConst) -> Result { - let fn_like = lookup_const_fn_by_id(ccx.tcx(), def_id); - let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call"); - - let body = match fn_like.body().expr { - Some(ref expr) => expr, - None => return Ok(C_nil(ccx)) - }; - - let args = &fn_like.decl().inputs; - assert_eq!(args.len(), arg_vals.len()); - - let arg_ids = args.iter().map(|arg| arg.pat.id); - let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect(); - - let substs = ccx.tcx().erase_regions(&substs); - let substs = monomorphize::apply_param_substs(ccx.tcx(), - param_substs, - &substs); - - const_expr(ccx, body, substs, Some(&fn_args), trueconst).map(|(res, _)| res) -} - -pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - ref_expr: &hir::Expr, - param_substs: &'tcx Substs<'tcx>) - -> &'tcx hir::Expr { - let substs = ccx.tcx().node_id_item_substs(ref_expr.id).substs; - let substs = ccx.tcx().erase_regions(&substs); - let substs = monomorphize::apply_param_substs(ccx.tcx(), - param_substs, - &substs); - match lookup_const_by_id(ccx.tcx(), def_id, Some(substs)) { - Some((ref expr, _ty)) => expr, - None => { - span_bug!(ref_expr.span, "constant item not found") - } - } -} - -pub enum ConstEvalFailure { - /// in case the const evaluator failed on something that panic at runtime - /// as defined in RFC 1229 - Runtime(ConstEvalErr), - // in case we found a true constant - Compiletime(ConstEvalErr), -} - -impl ConstEvalFailure { - fn into_inner(self) -> ConstEvalErr { - match self { - Runtime(e) => e, - Compiletime(e) => e, - } - } - - pub fn as_inner(&self) -> &ConstEvalErr { - match self { - &Runtime(ref e) => e, - &Compiletime(ref e) => e, - } - } -} - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum TrueConst { - Yes, No -} - -use self::ConstEvalFailure::*; - -fn get_const_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - ref_expr: &hir::Expr, - param_substs: &'tcx Substs<'tcx>) - -> Result { - let expr = get_const_expr(ccx, def_id, ref_expr, param_substs); - let empty_substs = Substs::empty(ccx.tcx()); - match get_const_expr_as_global(ccx, expr, ConstQualif::empty(), empty_substs, TrueConst::Yes) { - Err(Runtime(err)) => { - report_const_eval_err(ccx.tcx(), &err, expr.span, "expression").emit(); - Err(Compiletime(err)) - }, - other => other, - } -} - -pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - expr: &hir::Expr, - qualif: ConstQualif, - param_substs: &'tcx Substs<'tcx>, - trueconst: TrueConst) - -> Result { - debug!("get_const_expr_as_global: {:?}", expr.id); - // Special-case constants to cache a common global for all uses. - if let hir::ExprPath(..) = expr.node { - // `def` must be its own statement and cannot be in the `match` - // otherwise the `def_map` will be borrowed for the entire match instead - // of just to get the `def` value - match ccx.tcx().expect_def(expr.id) { - Def::Const(def_id) | Def::AssociatedConst(def_id) => { - if !ccx.tcx().tables.borrow().adjustments.contains_key(&expr.id) { - debug!("get_const_expr_as_global ({:?}): found const {:?}", - expr.id, def_id); - return get_const_val(ccx, def_id, expr, param_substs); - } - }, - _ => {}, - } - } - - let key = (expr.id, param_substs); - if let Some(&val) = ccx.const_values().borrow().get(&key) { - return Ok(val); - } - let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, - &ccx.tcx().expr_ty(expr)); - let val = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) { - // Avoid autorefs as they would create global instead of stack - // references, even when only the latter are correct. - const_expr_unadjusted(ccx, expr, ty, param_substs, None, trueconst)? - } else { - const_expr(ccx, expr, param_substs, None, trueconst)?.0 - }; - - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let val = unsafe { - if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() { - llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref()) - } else { - val - } - }; - - let lvalue = addr_of(ccx, val, type_of::align_of(ccx, ty), "const"); - ccx.const_values().borrow_mut().insert(key, lvalue); - Ok(lvalue) -} - -pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - e: &hir::Expr, - param_substs: &'tcx Substs<'tcx>, - fn_args: FnArgMap, - trueconst: TrueConst) - -> Result<(ValueRef, Ty<'tcx>), ConstEvalFailure> { - let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs, - &cx.tcx().expr_ty(e)); - let llconst = const_expr_unadjusted(cx, e, ety, param_substs, fn_args, trueconst)?; - let mut llconst = llconst; - let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs, - &cx.tcx().expr_ty_adjusted(e)); - let opt_adj = cx.tcx().tables.borrow().adjustments.get(&e.id).cloned(); - match opt_adj { - Some(AdjustNeverToAny(..)) => span_bug!(e.span, "const expression of type ! encountered"), - Some(AdjustReifyFnPointer) => { - match ety.sty { - ty::TyFnDef(def_id, substs, _) => { - llconst = Callee::def(cx, def_id, substs).reify(cx).val; - } - _ => { - bug!("{} cannot be reified to a fn ptr", ety) - } - } - } - Some(AdjustUnsafeFnPointer) | Some(AdjustMutToConstPointer) => { - // purely a type-level thing - } - Some(AdjustDerefRef(adj)) => { - let mut ty = ety; - // Save the last autoderef in case we can avoid it. - if adj.autoderefs > 0 { - for _ in 0..adj.autoderefs-1 { - let (dv, dt) = const_deref(cx, llconst, ty); - llconst = dv; - ty = dt; - } - } - - if adj.autoref.is_some() { - if adj.autoderefs == 0 { - // Don't copy data to do a deref+ref - // (i.e., skip the last auto-deref). - llconst = addr_of(cx, llconst, type_of::align_of(cx, ty), "autoref"); - ty = cx.tcx().mk_imm_ref(cx.tcx().mk_region(ty::ReErased), ty); - } - } else if adj.autoderefs > 0 { - let (dv, dt) = const_deref(cx, llconst, ty); - llconst = dv; - - // If we derefed a fat pointer then we will have an - // open type here. So we need to update the type with - // the one returned from const_deref. - ety_adjusted = dt; - } - - if let Some(target) = adj.unsize { - let target = monomorphize::apply_param_substs(cx.tcx(), - param_substs, - &target); - - let pointee_ty = ty.builtin_deref(true, ty::NoPreference) - .expect("consts: unsizing got non-pointer type").ty; - let (base, old_info) = if !type_is_sized(cx.tcx(), pointee_ty) { - // Normally, the source is a thin pointer and we are - // adding extra info to make a fat pointer. The exception - // is when we are upcasting an existing object fat pointer - // to use a different vtable. In that case, we want to - // load out the original data pointer so we can repackage - // it. - (const_get_elt(llconst, &[abi::FAT_PTR_ADDR as u32]), - Some(const_get_elt(llconst, &[abi::FAT_PTR_EXTRA as u32]))) - } else { - (llconst, None) - }; - - let unsized_ty = target.builtin_deref(true, ty::NoPreference) - .expect("consts: unsizing got non-pointer target type").ty; - let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to(); - let base = ptrcast(base, ptr_ty); - let info = base::unsized_info(cx, pointee_ty, unsized_ty, old_info); - - if old_info.is_none() { - let prev_const = cx.const_unsized().borrow_mut() - .insert(base, llconst); - assert!(prev_const.is_none() || prev_const == Some(llconst)); - } - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - llconst = C_struct(cx, &[base, info], false); - } - } - None => {} - }; - - let llty = type_of::sizing_type_of(cx, ety_adjusted); - let csize = machine::llsize_of_alloc(cx, val_ty(llconst)); - let tsize = machine::llsize_of_alloc(cx, llty); - if csize != tsize { - cx.sess().abort_if_errors(); - unsafe { - // FIXME these values could use some context - llvm::LLVMDumpValue(llconst); - llvm::LLVMDumpValue(C_undef(llty)); - } - bug!("const {:?} of type {:?} has size {} instead of {}", - e, ety_adjusted, - csize, tsize); - } - Ok((llconst, ety_adjusted)) -} - -fn check_unary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty, - te: ValueRef, trueconst: TrueConst) -> Result<(), ConstEvalFailure> { - // The only kind of unary expression that we check for validity - // here is `-expr`, to check if it "overflows" (e.g. `-i32::MIN`). - if let hir::ExprUnary(hir::UnNeg, ref inner_e) = e.node { - - // An unfortunate special case: we parse e.g. -128 as a - // negation of the literal 128, which means if we're expecting - // a i8 (or if it was already suffixed, e.g. `-128_i8`), then - // 128 will have already overflowed to -128, and so then the - // constant evaluator thinks we're trying to negate -128. - // - // Catch this up front by looking for ExprLit directly, - // and just accepting it. - if let hir::ExprLit(_) = inner_e.node { return Ok(()); } - let cval = match to_const_int(te, t, cx.tcx()) { - Some(v) => v, - None => return Ok(()), - }; - const_err(cx, e.span, (-cval).map_err(ErrKind::Math), trueconst)?; - } - Ok(()) -} - -pub fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option { - match t.sty { - ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type { - ast::IntTy::I8 => { - assert_eq!(input as i8 as i64, input); - Some(ConstInt::I8(input as i8)) - }, - ast::IntTy::I16 => { - assert_eq!(input as i16 as i64, input); - Some(ConstInt::I16(input as i16)) - }, - ast::IntTy::I32 => { - assert_eq!(input as i32 as i64, input); - Some(ConstInt::I32(input as i32)) - }, - ast::IntTy::I64 => { - Some(ConstInt::I64(input)) - }, - ast::IntTy::Is => { - ConstIsize::new(input, tcx.sess.target.int_type) - .ok().map(ConstInt::Isize) - }, - }), - ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type { - ast::UintTy::U8 => { - assert_eq!(input as u8 as u64, input); - Some(ConstInt::U8(input as u8)) - }, - ast::UintTy::U16 => { - assert_eq!(input as u16 as u64, input); - Some(ConstInt::U16(input as u16)) - }, - ast::UintTy::U32 => { - assert_eq!(input as u32 as u64, input); - Some(ConstInt::U32(input as u32)) - }, - ast::UintTy::U64 => { - Some(ConstInt::U64(input)) - }, - ast::UintTy::Us => { - ConstUsize::new(input, tcx.sess.target.uint_type) - .ok().map(ConstInt::Usize) - }, - }), - _ => None, - } -} - -pub fn const_err(cx: &CrateContext, - span: Span, - result: Result, - trueconst: TrueConst) - -> Result { - match (result, trueconst) { - (Ok(x), _) => Ok(x), - (Err(err), TrueConst::Yes) => { - let err = ConstEvalErr{ span: span, kind: err }; - report_const_eval_err(cx.tcx(), &err, span, "expression").emit(); - Err(Compiletime(err)) - }, - (Err(err), TrueConst::No) => { - let err = ConstEvalErr{ span: span, kind: err }; - let mut diag = cx.tcx().sess.struct_span_warn( - span, "this expression will panic at run-time"); - note_const_eval_err(cx.tcx(), &err, span, "expression", &mut diag); - diag.emit(); - Err(Runtime(err)) - }, - } -} - -fn check_binary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty, - te1: ValueRef, te2: ValueRef, - trueconst: TrueConst) -> Result<(), ConstEvalFailure> { - let b = if let hir::ExprBinary(b, _, _) = e.node { b } else { bug!() }; - let (lhs, rhs) = match (to_const_int(te1, t, cx.tcx()), to_const_int(te2, t, cx.tcx())) { - (Some(v1), Some(v2)) => (v1, v2), - _ => return Ok(()), - }; - let result = match b.node { - hir::BiAdd => lhs + rhs, - hir::BiSub => lhs - rhs, - hir::BiMul => lhs * rhs, - hir::BiDiv => lhs / rhs, - hir::BiRem => lhs % rhs, - hir::BiShl => lhs << rhs, - hir::BiShr => lhs >> rhs, - _ => return Ok(()), - }; - const_err(cx, e.span, result.map_err(ErrKind::Math), trueconst)?; - Ok(()) -} - -fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - e: &hir::Expr, - ety: Ty<'tcx>, - param_substs: &'tcx Substs<'tcx>, - fn_args: FnArgMap, - trueconst: TrueConst) - -> Result -{ - debug!("const_expr_unadjusted(e={:?}, ety={:?}, param_substs={:?})", - e, - ety, - param_substs); - - let map_list = |exprs: &[P]| -> Result, ConstEvalFailure> { - exprs.iter() - .map(|e| const_expr(cx, &e, param_substs, fn_args, trueconst).map(|(l, _)| l)) - .collect::>>() - .into_iter() - .collect() - // this dance is necessary to eagerly run const_expr so all errors are reported - }; - let _icx = push_ctxt("const_expr"); - Ok(match e.node { - hir::ExprLit(ref lit) => const_lit(cx, e, &lit), - hir::ExprBinary(b, ref e1, ref e2) => { - /* Neither type is bottom, and we expect them to be unified - * already, so the following is safe. */ - let (te1, ty) = const_expr(cx, &e1, param_substs, fn_args, trueconst)?; - debug!("const_expr_unadjusted: te1={:?}, ty={:?}", - Value(te1), ty); - assert!(!ty.is_simd()); - let is_float = ty.is_fp(); - let signed = ty.is_signed(); - - let (te2, ty2) = const_expr(cx, &e2, param_substs, fn_args, trueconst)?; - debug!("const_expr_unadjusted: te2={:?}, ty={:?}", - Value(te2), ty2); - - check_binary_expr_validity(cx, e, ty, te1, te2, trueconst)?; - - unsafe { match b.node { - hir::BiAdd if is_float => llvm::LLVMConstFAdd(te1, te2), - hir::BiAdd => llvm::LLVMConstAdd(te1, te2), - - hir::BiSub if is_float => llvm::LLVMConstFSub(te1, te2), - hir::BiSub => llvm::LLVMConstSub(te1, te2), - - hir::BiMul if is_float => llvm::LLVMConstFMul(te1, te2), - hir::BiMul => llvm::LLVMConstMul(te1, te2), - - hir::BiDiv if is_float => llvm::LLVMConstFDiv(te1, te2), - hir::BiDiv if signed => llvm::LLVMConstSDiv(te1, te2), - hir::BiDiv => llvm::LLVMConstUDiv(te1, te2), - - hir::BiRem if is_float => llvm::LLVMConstFRem(te1, te2), - hir::BiRem if signed => llvm::LLVMConstSRem(te1, te2), - hir::BiRem => llvm::LLVMConstURem(te1, te2), - - hir::BiAnd => llvm::LLVMConstAnd(te1, te2), - hir::BiOr => llvm::LLVMConstOr(te1, te2), - hir::BiBitXor => llvm::LLVMConstXor(te1, te2), - hir::BiBitAnd => llvm::LLVMConstAnd(te1, te2), - hir::BiBitOr => llvm::LLVMConstOr(te1, te2), - hir::BiShl => { - let te2 = base::cast_shift_const_rhs(b.node, te1, te2); - llvm::LLVMConstShl(te1, te2) - }, - hir::BiShr => { - let te2 = base::cast_shift_const_rhs(b.node, te1, te2); - if signed { llvm::LLVMConstAShr(te1, te2) } - else { llvm::LLVMConstLShr(te1, te2) } - }, - hir::BiEq | hir::BiNe | hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => { - if is_float { - let cmp = base::bin_op_to_fcmp_predicate(b.node); - llvm::LLVMConstFCmp(cmp, te1, te2) - } else { - let cmp = base::bin_op_to_icmp_predicate(b.node, signed); - llvm::LLVMConstICmp(cmp, te1, te2) - } - }, - } } // unsafe { match b.node { - }, - hir::ExprUnary(u, ref inner_e) => { - let (te, ty) = const_expr(cx, &inner_e, param_substs, fn_args, trueconst)?; - - check_unary_expr_validity(cx, e, ty, te, trueconst)?; - - let is_float = ty.is_fp(); - unsafe { match u { - hir::UnDeref => const_deref(cx, te, ty).0, - hir::UnNot => llvm::LLVMConstNot(te), - hir::UnNeg if is_float => llvm::LLVMConstFNeg(te), - hir::UnNeg => llvm::LLVMConstNeg(te), - } } - }, - hir::ExprField(ref base, field) => { - let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?; - let brepr = adt::represent_type(cx, bt); - let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None); - let ix = vinfo.field_index(field.node); - adt::const_get_field(&brepr, bv, vinfo.discr, ix) - }, - hir::ExprTupField(ref base, idx) => { - let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?; - let brepr = adt::represent_type(cx, bt); - let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None); - adt::const_get_field(&brepr, bv, vinfo.discr, idx.node) - }, - hir::ExprIndex(ref base, ref index) => { - let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?; - let iv = const_expr(cx, &index, param_substs, fn_args, TrueConst::Yes)?.0; - let iv = if let Some(iv) = const_to_opt_uint(iv) { - iv - } else { - span_bug!(index.span, "index is not an integer-constant expression"); - }; - let (arr, len) = match bt.sty { - ty::TyArray(_, u) => (bv, C_uint(cx, u)), - ty::TySlice(..) | ty::TyStr => { - let e1 = const_get_elt(bv, &[0]); - (load_const(cx, e1, bt), const_get_elt(bv, &[1])) - }, - ty::TyRef(_, mt) => match mt.ty.sty { - ty::TyArray(_, u) => { - (load_const(cx, bv, mt.ty), C_uint(cx, u)) - }, - _ => span_bug!(base.span, - "index-expr base must be a vector \ - or string type, found {:?}", - bt), - }, - _ => span_bug!(base.span, - "index-expr base must be a vector \ - or string type, found {:?}", - bt), - }; - - let len = unsafe { llvm::LLVMConstIntGetZExtValue(len) as u64 }; - let len = match bt.sty { - ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => match ty.sty { - ty::TyStr => { - assert!(len > 0); - len - 1 - }, - _ => len, - }, - _ => len, - }; - if iv >= len { - // FIXME #3170: report this earlier on in the const-eval - // pass. Reporting here is a bit late. - const_err(cx, e.span, Err(ErrKind::IndexOutOfBounds { - len: len, - index: iv - }), trueconst)?; - C_undef(val_ty(arr).element_type()) - } else { - const_get_elt(arr, &[iv as c_uint]) - } - }, - hir::ExprCast(ref base, _) => { - let t_cast = ety; - let llty = type_of::type_of(cx, t_cast); - let (v, t_expr) = const_expr(cx, &base, param_substs, fn_args, trueconst)?; - debug!("trans_const_cast({:?} as {:?})", t_expr, t_cast); - if expr::cast_is_noop(cx.tcx(), base, t_expr, t_cast) { - return Ok(v); - } - if type_is_fat_ptr(cx.tcx(), t_expr) { - // Fat pointer casts. - let t_cast_inner = - t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty; - let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to(); - let addr = ptrcast(const_get_elt(v, &[abi::FAT_PTR_ADDR as u32]), - ptr_ty); - if type_is_fat_ptr(cx.tcx(), t_cast) { - let info = const_get_elt(v, &[abi::FAT_PTR_EXTRA as u32]); - return Ok(C_struct(cx, &[addr, info], false)) - } else { - return Ok(addr); - } - } - unsafe { match ( - CastTy::from_ty(t_expr).expect("bad input type for cast"), - CastTy::from_ty(t_cast).expect("bad output type for cast"), - ) { - (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => { - let repr = adt::represent_type(cx, t_expr); - let discr = adt::const_get_discrim(&repr, v); - let iv = C_integral(cx.int_type(), discr.0, false); - let s = adt::is_discr_signed(&repr) as Bool; - llvm::LLVMConstIntCast(iv, llty.to_ref(), s) - }, - (CastTy::Int(_), CastTy::Int(_)) => { - let s = t_expr.is_signed() as Bool; - llvm::LLVMConstIntCast(v, llty.to_ref(), s) - }, - (CastTy::Int(_), CastTy::Float) => { - if t_expr.is_signed() { - llvm::LLVMConstSIToFP(v, llty.to_ref()) - } else { - llvm::LLVMConstUIToFP(v, llty.to_ref()) - } - }, - (CastTy::Float, CastTy::Float) => llvm::LLVMConstFPCast(v, llty.to_ref()), - (CastTy::Float, CastTy::Int(IntTy::I)) => llvm::LLVMConstFPToSI(v, llty.to_ref()), - (CastTy::Float, CastTy::Int(_)) => llvm::LLVMConstFPToUI(v, llty.to_ref()), - (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_)) - | (CastTy::RPtr(_), CastTy::Ptr(_)) => { - ptrcast(v, llty) - }, - (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion? - (CastTy::Int(_), CastTy::Ptr(_)) => llvm::LLVMConstIntToPtr(v, llty.to_ref()), - (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => { - llvm::LLVMConstPtrToInt(v, llty.to_ref()) - }, - _ => { - span_bug!(e.span, "bad combination of types for cast") - }, - } } // unsafe { match ( ... ) { - }, - hir::ExprAddrOf(hir::MutImmutable, ref sub) => { - // If this is the address of some static, then we need to return - // the actual address of the static itself (short circuit the rest - // of const eval). - let mut cur = sub; - loop { - match cur.node { - hir::ExprBlock(ref blk) => { - if let Some(ref sub) = blk.expr { - cur = sub; - } else { - break; - } - }, - _ => break, - } - } - if let Some(Def::Static(def_id, _)) = cx.tcx().expect_def_or_none(cur.id) { - get_static(cx, def_id).val - } else { - // If this isn't the address of a static, then keep going through - // normal constant evaluation. - let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?; - addr_of(cx, v, type_of::align_of(cx, ty), "ref") - } - }, - hir::ExprAddrOf(hir::MutMutable, ref sub) => { - let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?; - addr_of_mut(cx, v, type_of::align_of(cx, ty), "ref_mut_slice") - }, - hir::ExprTup(ref es) => { - let repr = adt::represent_type(cx, ety); - let vals = map_list(&es[..])?; - adt::trans_const(cx, &repr, Disr(0), &vals[..]) - }, - hir::ExprStruct(_, ref fs, ref base_opt) => { - let repr = adt::represent_type(cx, ety); - - let base_val = match *base_opt { - Some(ref base) => Some(const_expr( - cx, - &base, - param_substs, - fn_args, - trueconst, - )?), - None => None - }; - - let VariantInfo { discr, fields } = VariantInfo::of_node(cx.tcx(), ety, e.id); - let cs = fields.iter().enumerate().map(|(ix, &Field(f_name, _))| { - match (fs.iter().find(|f| f_name == f.name.node), base_val) { - (Some(ref f), _) => { - const_expr(cx, &f.expr, param_substs, fn_args, trueconst).map(|(l, _)| l) - }, - (_, Some((bv, _))) => Ok(adt::const_get_field(&repr, bv, discr, ix)), - (_, None) => span_bug!(e.span, "missing struct field"), - } - }) - .collect::>>() - .into_iter() - .collect::,ConstEvalFailure>>(); - let cs = cs?; - if ety.is_simd() { - C_vector(&cs[..]) - } else { - adt::trans_const(cx, &repr, discr, &cs[..]) - } - }, - hir::ExprVec(ref es) => { - let unit_ty = ety.sequence_element_type(cx.tcx()); - let llunitty = type_of::type_of(cx, unit_ty); - let vs = es.iter() - .map(|e| const_expr( - cx, - &e, - param_substs, - fn_args, - trueconst, - ).map(|(l, _)| l)) - .collect::>>() - .into_iter() - .collect::, ConstEvalFailure>>(); - let vs = vs?; - // If the vector contains enums, an LLVM array won't work. - if vs.iter().any(|vi| val_ty(*vi) != llunitty) { - C_struct(cx, &vs[..], false) - } else { - C_array(llunitty, &vs[..]) - } - }, - hir::ExprRepeat(ref elem, ref count) => { - let unit_ty = ety.sequence_element_type(cx.tcx()); - let llunitty = type_of::type_of(cx, unit_ty); - let n = eval_length(cx.tcx(), count, "repeat count").unwrap(); - let unit_val = const_expr(cx, &elem, param_substs, fn_args, trueconst)?.0; - let vs = vec![unit_val; n]; - if val_ty(unit_val) != llunitty { - C_struct(cx, &vs[..], false) - } else { - C_array(llunitty, &vs[..]) - } - }, - hir::ExprPath(..) => { - match cx.tcx().expect_def(e.id) { - Def::Local(_, id) => { - if let Some(val) = fn_args.and_then(|args| args.get(&id).cloned()) { - val - } else { - span_bug!(e.span, "const fn argument not found") - } - } - Def::Fn(..) | Def::Method(..) => C_nil(cx), - Def::Const(def_id) | Def::AssociatedConst(def_id) => { - load_const(cx, get_const_val(cx, def_id, e, param_substs)?, - ety) - } - Def::Variant(enum_did, variant_did) => { - let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did); - match vinfo.kind { - ty::VariantKind::Unit => { - let repr = adt::represent_type(cx, ety); - adt::trans_const(cx, &repr, Disr::from(vinfo.disr_val), &[]) - } - ty::VariantKind::Tuple => C_nil(cx), - ty::VariantKind::Struct => { - span_bug!(e.span, "path-expr refers to a dict variant!") - } - } - } - // Unit struct or ctor. - Def::Struct(..) => C_null(type_of::type_of(cx, ety)), - _ => { - span_bug!(e.span, "expected a const, fn, struct, \ - or variant def") - } - } - }, - hir::ExprCall(ref callee, ref args) => { - let mut callee = &**callee; - loop { - callee = match callee.node { - hir::ExprBlock(ref block) => match block.expr { - Some(ref tail) => &tail, - None => break, - }, - _ => break, - }; - } - let arg_vals = map_list(args)?; - match cx.tcx().expect_def(callee.id) { - Def::Fn(did) | Def::Method(did) => { - const_fn_call( - cx, - did, - cx.tcx().node_id_item_substs(callee.id).substs, - &arg_vals, - param_substs, - trueconst, - )? - } - Def::Struct(..) => { - if ety.is_simd() { - C_vector(&arg_vals[..]) - } else { - let repr = adt::represent_type(cx, ety); - adt::trans_const(cx, &repr, Disr(0), &arg_vals[..]) - } - } - Def::Variant(enum_did, variant_did) => { - let repr = adt::represent_type(cx, ety); - let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did); - adt::trans_const(cx, - &repr, - Disr::from(vinfo.disr_val), - &arg_vals[..]) - } - _ => span_bug!(e.span, "expected a struct, variant, or const fn def"), - } - }, - hir::ExprMethodCall(_, _, ref args) => { - let arg_vals = map_list(args)?; - let method_call = ty::MethodCall::expr(e.id); - let method = cx.tcx().tables.borrow().method_map[&method_call]; - const_fn_call(cx, method.def_id, method.substs, - &arg_vals, param_substs, trueconst)? - }, - hir::ExprType(ref e, _) => const_expr(cx, &e, param_substs, fn_args, trueconst)?.0, - hir::ExprBlock(ref block) => { - match block.expr { - Some(ref expr) => const_expr( - cx, - &expr, - param_substs, - fn_args, - trueconst, - )?.0, - None => C_nil(cx), - } - }, - hir::ExprClosure(_, ref decl, ref body, _) => { - match ety.sty { - ty::TyClosure(def_id, substs) => { - closure::trans_closure_expr(closure::Dest::Ignore(cx), - decl, - body, - e.id, - def_id, - substs); - } - _ => - span_bug!( - e.span, - "bad type for closure expr: {:?}", ety) - } - C_null(type_of::type_of(cx, ety)) - }, - _ => span_bug!(e.span, - "bad constant expression type in consts::const_expr"), - }) -} - -pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) - -> Datum<'tcx, Lvalue> { - let ty = ccx.tcx().lookup_item_type(def_id).ty; - +pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { let instance = Instance::mono(ccx.shared(), def_id); if let Some(&g) = ccx.instances().borrow().get(&instance) { - return Datum::new(g, ty, Lvalue::new("static")); + return g; } + let ty = ccx.tcx().lookup_item_type(def_id).ty; let g = if let Some(id) = ccx.tcx().map.as_local_node_id(def_id) { let llty = type_of::type_of(ccx, ty); @@ -1032,14 +105,10 @@ pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) let defined_in_current_codegen_unit = ccx.codegen_unit() .items() .contains_key(&TransItem::Static(id)); - if defined_in_current_codegen_unit { - if declare::get_declared_value(ccx, sym).is_none() { - span_bug!(span, "trans: Static not properly pre-defined?"); - } - } else { - if declare::get_declared_value(ccx, sym).is_some() { - span_bug!(span, "trans: Conflicting symbol names for static?"); - } + assert!(!defined_in_current_codegen_unit); + + if declare::get_declared_value(ccx, sym).is_some() { + span_bug!(span, "trans: Conflicting symbol names for static?"); } let g = declare::define_global(ccx, sym, llty).unwrap(); @@ -1136,34 +205,20 @@ pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) ccx.instances().borrow_mut().insert(instance, g); ccx.statics().borrow_mut().insert(g, def_id); - Datum::new(g, ty, Lvalue::new("static")) + g } pub fn trans_static(ccx: &CrateContext, m: hir::Mutability, - expr: &hir::Expr, id: ast::NodeId, attrs: &[ast::Attribute]) -> Result { unsafe { let _icx = push_ctxt("trans_static"); let def_id = ccx.tcx().map.local_def_id(id); - let datum = get_static(ccx, def_id); + let g = get_static(ccx, def_id); - let check_attrs = |attrs: &[ast::Attribute]| { - let default_to_mir = ccx.sess().opts.debugging_opts.orbit; - let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" }; - default_to_mir ^ attrs.iter().any(|item| item.check_name(invert)) - }; - let use_mir = check_attrs(ccx.tcx().map.attrs(id)); - - let v = if use_mir { - ::mir::trans_static_initializer(ccx, def_id) - } else { - let empty_substs = Substs::empty(ccx.tcx()); - const_expr(ccx, expr, empty_substs, None, TrueConst::Yes) - .map(|(v, _)| v) - }.map_err(|e| e.into_inner())?; + let v = ::mir::trans_static_initializer(ccx, def_id)?; // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected @@ -1175,31 +230,32 @@ pub fn trans_static(ccx: &CrateContext, v }; - let llty = type_of::type_of(ccx, datum.ty); + let ty = ccx.tcx().lookup_item_type(def_id).ty; + let llty = type_of::type_of(ccx, ty); let g = if val_llty == llty { - datum.val + g } else { // If we created the global with the wrong type, // correct the type. let empty_string = CString::new("").unwrap(); - let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(datum.val)); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); - llvm::LLVMSetValueName(datum.val, empty_string.as_ptr()); + llvm::LLVMSetValueName(g, empty_string.as_ptr()); let new_g = llvm::LLVMRustGetOrInsertGlobal( ccx.llmod(), name_string.as_ptr(), val_llty.to_ref()); // To avoid breaking any invariants, we leave around the old // global for the moment; we'll replace all references to it // with the new global later. (See base::trans_crate.) - ccx.statics_to_rauw().borrow_mut().push((datum.val, new_g)); + ccx.statics_to_rauw().borrow_mut().push((g, new_g)); new_g }; - llvm::LLVMSetAlignment(g, type_of::align_of(ccx, datum.ty)); + llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty)); llvm::LLVMSetInitializer(g, v); // As an optimization, all shared statics which do not have interior // mutability are placed into read-only memory. if m != hir::MutMutable { - let tcontents = datum.ty.type_contents(ccx.tcx()); + let tcontents = ty.type_contents(ccx.tcx()); if !tcontents.interior_unsafe() { llvm::LLVMSetGlobalConstant(g, llvm::True); } diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index c31dbf8943e08..0a295b251b31e 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -53,9 +53,7 @@ pub struct Stats { pub n_glues_created: Cell, pub n_null_glues: Cell, pub n_real_glues: Cell, - pub n_fallback_instantiations: Cell, pub n_fns: Cell, - pub n_monos: Cell, pub n_inlines: Cell, pub n_closures: Cell, pub n_llvm_insns: Cell, @@ -79,7 +77,6 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, stats: Stats, check_overflow: bool, - check_drop_flag_for_sanity: bool, mir_map: &'a MirMap<'tcx>, mir_cache: RefCell>>, @@ -104,7 +101,6 @@ pub struct LocalCrateContext<'tcx> { drop_glues: RefCell, (ValueRef, FnType)>>, /// Cache instances of monomorphic and polymorphic items instances: RefCell, ValueRef>>, - monomorphizing: RefCell>, /// Cache generated vtables vtables: RefCell, ValueRef>>, /// Cache of constant strings, @@ -424,8 +420,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { symbol_hasher: Sha256, link_meta: LinkMeta, reachable: NodeSet, - check_overflow: bool, - check_drop_flag_for_sanity: bool) + check_overflow: bool) -> SharedCrateContext<'b, 'tcx> { let (metadata_llcx, metadata_llmod) = unsafe { create_context_and_module(&tcx.sess, "metadata") @@ -490,9 +485,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { n_glues_created: Cell::new(0), n_null_glues: Cell::new(0), n_real_glues: Cell::new(0), - n_fallback_instantiations: Cell::new(0), n_fns: Cell::new(0), - n_monos: Cell::new(0), n_inlines: Cell::new(0), n_closures: Cell::new(0), n_llvm_insns: Cell::new(0), @@ -500,7 +493,6 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { fn_stats: RefCell::new(Vec::new()), }, check_overflow: check_overflow, - check_drop_flag_for_sanity: check_drop_flag_for_sanity, use_dll_storage_attrs: use_dll_storage_attrs, translation_items: RefCell::new(FnvHashSet()), trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())), @@ -629,7 +621,6 @@ impl<'tcx> LocalCrateContext<'tcx> { fn_pointer_shims: RefCell::new(FnvHashMap()), drop_glues: RefCell::new(FnvHashMap()), instances: RefCell::new(FnvHashMap()), - monomorphizing: RefCell::new(DefIdMap()), vtables: RefCell::new(FnvHashMap()), const_cstr_cache: RefCell::new(FnvHashMap()), const_unsized: RefCell::new(FnvHashMap()), @@ -833,10 +824,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().instances } - pub fn monomorphizing<'a>(&'a self) -> &'a RefCell> { - &self.local().monomorphizing - } - pub fn vtables<'a>(&'a self) -> &'a RefCell, ValueRef>> { &self.local().vtables } @@ -964,13 +951,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { self.shared.check_overflow } - pub fn check_drop_flag_for_sanity(&self) -> bool { - // This controls whether we emit a conditional llvm.debugtrap - // guarded on whether the dropflag is one of its (two) valid - // values. - self.shared.check_drop_flag_for_sanity - } - pub fn use_dll_storage_attrs(&self) -> bool { self.shared.use_dll_storage_attrs() } diff --git a/src/librustc_trans/controlflow.rs b/src/librustc_trans/controlflow.rs deleted file mode 100644 index 8b3a8a2bfccfb..0000000000000 --- a/src/librustc_trans/controlflow.rs +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::ValueRef; -use rustc::hir::def::Def; -use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem}; -use rustc::ty::subst::Substs; -use base::*; -use basic_block::BasicBlock; -use build::*; -use callee::{Callee, ArgVals}; -use cleanup::CleanupMethods; -use cleanup; -use common::*; -use consts; -use debuginfo; -use debuginfo::{DebugLoc, ToDebugLoc}; -use expr; -use machine; - -use rustc::hir; - -use syntax::ast; -use syntax::parse::token::InternedString; -use syntax::parse::token; - -pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - s: &hir::Stmt) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_stmt"); - let fcx = cx.fcx; - debug!("trans_stmt({:?})", s); - - if cx.unreachable.get() { - return cx; - } - - if cx.sess().asm_comments() { - add_span_comment(cx, s.span, &format!("{:?}", s)); - } - - let mut bcx = cx; - - let id = s.node.id(); - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - match s.node { - hir::StmtExpr(ref e, _) | hir::StmtSemi(ref e, _) => { - bcx = trans_stmt_semi(bcx, &e); - } - hir::StmtDecl(ref d, _) => { - match d.node { - hir::DeclLocal(ref local) => { - bcx = init_local(bcx, &local); - debuginfo::create_local_var_metadata(bcx, &local); - } - // Inner items are visited by `trans_item`/`trans_meth`. - hir::DeclItem(_) => {}, - } - } - } - - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, s.node.id()); - - return bcx; -} - -pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &hir::Expr) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_stmt_semi"); - - if cx.unreachable.get() { - return cx; - } - - let ty = expr_ty(cx, e); - if cx.fcx.type_needs_drop(ty) { - expr::trans_to_lvalue(cx, e, "stmt").bcx - } else { - expr::trans_into(cx, e, expr::Ignore) - } -} - -pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - b: &hir::Block, - mut dest: expr::Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_block"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let mut bcx = bcx; - - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - for s in &b.stmts { - bcx = trans_stmt(bcx, s); - } - - if dest != expr::Ignore { - let block_ty = node_id_type(bcx, b.id); - - if b.expr.is_none() || type_is_zero_size(bcx.ccx(), block_ty) { - dest = expr::Ignore; - } else if b.expr.is_some() { - // If the block has an expression, but that expression isn't reachable, - // don't save into the destination given, ignore it. - if let Some(ref cfg) = bcx.fcx.cfg { - if !cfg.node_is_reachable(b.expr.as_ref().unwrap().id) { - dest = expr::Ignore; - } - } - } - } - - match b.expr { - Some(ref e) => { - if !bcx.unreachable.get() { - bcx = expr::trans_into(bcx, &e, dest); - } - } - None => { - assert!(dest == expr::Ignore || bcx.unreachable.get()); - } - } - - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id); - - return bcx; -} - -pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - if_id: ast::NodeId, - cond: &hir::Expr, - thn: &hir::Block, - els: Option<&hir::Expr>, - dest: expr::Dest) - -> Block<'blk, 'tcx> { - debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={:?})", - bcx.to_str(), if_id, cond, thn.id, dest); - let _icx = push_ctxt("trans_if"); - - if bcx.unreachable.get() { - return bcx; - } - - let mut bcx = bcx; - - let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool()); - - // Drop branches that are known to be impossible - if let Some(cv) = const_to_opt_uint(cond_val) { - if cv == 1 { - // if true { .. } [else { .. }] - bcx = trans_block(bcx, &thn, dest); - DebugLoc::None.apply(bcx.fcx); - } else { - if let Some(elexpr) = els { - bcx = expr::trans_into(bcx, &elexpr, dest); - DebugLoc::None.apply(bcx.fcx); - } - } - - return bcx; - } - - let name = format!("then-block-{}-", thn.id); - let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id); - let then_bcx_out = trans_block(then_bcx_in, &thn, dest); - DebugLoc::None.apply(bcx.fcx); - - let cond_source_loc = cond.debug_loc(); - - let next_bcx; - match els { - Some(elexpr) => { - let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id); - let else_bcx_out = expr::trans_into(else_bcx_in, &elexpr, dest); - next_bcx = bcx.fcx.join_blocks(if_id, - &[then_bcx_out, else_bcx_out]); - CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc); - } - - None => { - next_bcx = bcx.fcx.new_id_block("next-block", if_id); - Br(then_bcx_out, next_bcx.llbb, DebugLoc::None); - CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc); - } - } - - // Clear the source location because it is still set to whatever has been translated - // right before. - DebugLoc::None.apply(next_bcx.fcx); - - next_bcx -} - -pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - loop_expr: &hir::Expr, - cond: &hir::Expr, - body: &hir::Block) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_while"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // bcx - // | - // cond_bcx_in <--------+ - // | | - // cond_bcx_out | - // | | | - // | body_bcx_in | - // cleanup_blk | | - // | body_bcx_out --+ - // next_bcx_in - - let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id); - let cond_bcx_in = fcx.new_id_block("while_cond", cond.id); - let body_bcx_in = fcx.new_id_block("while_body", body.id); - - fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]); - - Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc()); - - // compile the block where we will handle loop cleanups - let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK); - - // compile the condition - let Result {bcx: cond_bcx_out, val: cond_val} = - expr::trans(cond_bcx_in, cond).to_llbool(); - - CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc()); - - // loop body: - let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore); - Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None); - - fcx.pop_loop_cleanup_scope(loop_expr.id); - return next_bcx_in; -} - -pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - loop_expr: &hir::Expr, - body: &hir::Block) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_loop"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // bcx - // | - // body_bcx_in - // | - // body_bcx_out - // - // next_bcx - // - // Links between body_bcx_in and next_bcx are created by - // break statements. - - let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id); - let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id); - - fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]); - - Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc()); - let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore); - Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None); - - fcx.pop_loop_cleanup_scope(loop_expr.id); - - // If there are no predecessors for the next block, we just translated an endless loop and the - // next block is unreachable - if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() { - Unreachable(next_bcx_in); - } - - return next_bcx_in; -} - -pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - opt_label: Option, - exit: usize) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_break_cont"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // Locate loop that we will break to - let loop_id = match opt_label { - None => fcx.top_loop_scope(), - Some(_) => { - match bcx.tcx().expect_def(expr.id) { - Def::Label(loop_id) => loop_id, - r => { - bug!("{:?} in def-map for label", r) - } - } - } - }; - - // Generate appropriate cleanup code and branch - let cleanup_llbb = fcx.normal_exit_block(loop_id, exit); - Br(bcx, cleanup_llbb, expr.debug_loc()); - Unreachable(bcx); // anything afterwards should be ignored - return bcx; -} - -pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - label_opt: Option) - -> Block<'blk, 'tcx> { - return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK); -} - -pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - label_opt: Option) - -> Block<'blk, 'tcx> { - return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP); -} - -pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - return_expr: &hir::Expr, - retval_expr: Option<&hir::Expr>) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_ret"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let mut bcx = bcx; - if let Some(x) = retval_expr { - let dest = if fcx.llretslotptr.get().is_some() { - expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")) - } else { - expr::Ignore - }; - bcx = expr::trans_into(bcx, &x, dest); - match dest { - expr::SaveIn(slot) if fcx.needs_ret_allocas => { - Store(bcx, slot, fcx.llretslotptr.get().unwrap()); - } - _ => {} - } - } - let cleanup_llbb = fcx.return_exit_block(); - Br(bcx, cleanup_llbb, return_expr.debug_loc()); - Unreachable(bcx); - return bcx; -} - -pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - fail_str: InternedString) - -> Block<'blk, 'tcx> { - let ccx = bcx.ccx(); - let _icx = push_ctxt("trans_fail_value"); - - if bcx.unreachable.get() { - return bcx; - } - - let v_str = C_str_slice(ccx, fail_str); - let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo); - let filename = token::intern_and_get_ident(&loc.file.name); - let filename = C_str_slice(ccx, filename); - let line = C_u32(ccx, loc.line as u32); - let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false); - let align = machine::llalign_of_min(ccx, val_ty(expr_file_line_const)); - let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc"); - let args = vec!(expr_file_line); - let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicFnLangItem); - Callee::def(ccx, did, Substs::empty(ccx.tcx())) - .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx -} - -pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - index: ValueRef, - len: ValueRef) - -> Block<'blk, 'tcx> { - let ccx = bcx.ccx(); - let _icx = push_ctxt("trans_fail_bounds_check"); - - if bcx.unreachable.get() { - return bcx; - } - - // Extract the file/line from the span - let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo); - let filename = token::intern_and_get_ident(&loc.file.name); - - // Invoke the lang item - let filename = C_str_slice(ccx, filename); - let line = C_u32(ccx, loc.line as u32); - let file_line_const = C_struct(ccx, &[filename, line], false); - let align = machine::llalign_of_min(ccx, val_ty(file_line_const)); - let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc"); - let args = vec!(file_line, index, len); - let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicBoundsCheckFnLangItem); - Callee::def(ccx, did, Substs::empty(ccx.tcx())) - .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx -} diff --git a/src/librustc_trans/datum.rs b/src/librustc_trans/datum.rs deleted file mode 100644 index 875f88e37c916..0000000000000 --- a/src/librustc_trans/datum.rs +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! ## The Datum module -//! -//! A `Datum` encapsulates the result of evaluating a Rust expression. It -//! contains a `ValueRef` indicating the result, a `Ty` describing -//! the Rust type, but also a *kind*. The kind indicates whether the datum -//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of -//! rvalues -- whether or not the value is "by ref" or "by value". -//! -//! The datum API is designed to try and help you avoid memory errors like -//! forgetting to arrange cleanup or duplicating a value. The type of the -//! datum incorporates the kind, and thus reflects whether it has cleanup -//! scheduled: -//! -//! - `Datum` -- by ref, cleanup scheduled -//! - `Datum` -- by value or by ref, no cleanup scheduled -//! - `Datum` -- either `Datum` or `Datum` -//! -//! Rvalue and expr datums are noncopyable, and most of the methods on -//! datums consume the datum itself (with some notable exceptions). This -//! reflects the fact that datums may represent affine values which ought -//! to be consumed exactly once, and if you were to try to (for example) -//! store an affine value multiple times, you would be duplicating it, -//! which would certainly be a bug. -//! -//! Some of the datum methods, however, are designed to work only on -//! copyable values such as ints or pointers. Those methods may borrow the -//! datum (`&self`) rather than consume it, but they always include -//! assertions on the type of the value represented to check that this -//! makes sense. An example is `shallow_copy()`, which duplicates -//! a datum value. -//! -//! Translating an expression always yields a `Datum` result, but -//! the methods `to_[lr]value_datum()` can be used to coerce a -//! `Datum` into a `Datum` or `Datum` as -//! needed. Coercing to an lvalue is fairly common, and generally occurs -//! whenever it is necessary to inspect a value and pull out its -//! subcomponents (for example, a match, or indexing expression). Coercing -//! to an rvalue is more unusual; it occurs when moving values from place -//! to place, such as in an assignment expression or parameter passing. -//! -//! ### Lvalues in detail -//! -//! An lvalue datum is one for which cleanup has been scheduled. Lvalue -//! datums are always located in memory, and thus the `ValueRef` for an -//! LLVM value is always a pointer to the actual Rust value. This means -//! that if the Datum has a Rust type of `int`, then the LLVM type of the -//! `ValueRef` will be `int*` (pointer to int). -//! -//! Because lvalues already have cleanups scheduled, the memory must be -//! zeroed to prevent the cleanup from taking place (presuming that the -//! Rust type needs drop in the first place, otherwise it doesn't -//! matter). The Datum code automatically performs this zeroing when the -//! value is stored to a new location, for example. -//! -//! Lvalues usually result from evaluating lvalue expressions. For -//! example, evaluating a local variable `x` yields an lvalue, as does a -//! reference to a field like `x.f` or an index `x[i]`. -//! -//! Lvalue datums can also arise by *converting* an rvalue into an lvalue. -//! This is done with the `to_lvalue_datum` method defined on -//! `Datum`. Basically this method just schedules cleanup if the -//! datum is an rvalue, possibly storing the value into a stack slot first -//! if needed. Converting rvalues into lvalues occurs in constructs like -//! `&foo()` or `match foo() { ref x => ... }`, where the user is -//! implicitly requesting a temporary. -//! -//! ### Rvalues in detail -//! -//! Rvalues datums are values with no cleanup scheduled. One must be -//! careful with rvalue datums to ensure that cleanup is properly -//! arranged, usually by converting to an lvalue datum or by invoking the -//! `add_clean` method. -//! -//! ### Scratch datums -//! -//! Sometimes you need some temporary scratch space. The functions -//! `[lr]value_scratch_datum()` can be used to get temporary stack -//! space. As their name suggests, they yield lvalues and rvalues -//! respectively. That is, the slot from `lvalue_scratch_datum` will have -//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not. - -pub use self::Expr::*; -pub use self::RvalueMode::*; - -use llvm::ValueRef; -use adt; -use base::*; -use build::{Load, Store}; -use common::*; -use cleanup; -use cleanup::{CleanupMethods, DropHintDatum, DropHintMethods}; -use expr; -use tvec; -use value::Value; -use rustc::ty::Ty; - -use std::fmt; -use syntax::ast; -use syntax_pos::DUMMY_SP; - -/// A `Datum` encapsulates the result of evaluating an expression. It -/// describes where the value is stored, what Rust type the value has, -/// whether it is addressed by reference, and so forth. Please refer -/// the section on datums in `README.md` for more details. -#[derive(Clone, Copy)] -pub struct Datum<'tcx, K> { - /// The llvm value. This is either a pointer to the Rust value or - /// the value itself, depending on `kind` below. - pub val: ValueRef, - - /// The rust type of the value. - pub ty: Ty<'tcx>, - - /// Indicates whether this is by-ref or by-value. - pub kind: K, -} - -impl<'tcx, K: fmt::Debug> fmt::Debug for Datum<'tcx, K> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Datum({:?}, {:?}, {:?})", - Value(self.val), self.ty, self.kind) - } -} - -pub struct DatumBlock<'blk, 'tcx: 'blk, K> { - pub bcx: Block<'blk, 'tcx>, - pub datum: Datum<'tcx, K>, -} - -#[derive(Debug)] -pub enum Expr { - /// a fresh value that was produced and which has no cleanup yet - /// because it has not yet "landed" into its permanent home - RvalueExpr(Rvalue), - - /// `val` is a pointer into memory for which a cleanup is scheduled - /// (and thus has type *T). If you move out of an Lvalue, you must - /// zero out the memory (FIXME #5016). - LvalueExpr(Lvalue), -} - -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum DropFlagInfo { - DontZeroJustUse(ast::NodeId), - ZeroAndMaintain(ast::NodeId), - None, -} - -impl DropFlagInfo { - pub fn must_zero(&self) -> bool { - match *self { - DropFlagInfo::DontZeroJustUse(..) => false, - DropFlagInfo::ZeroAndMaintain(..) => true, - DropFlagInfo::None => true, - } - } - - pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) - -> Option> { - let id = match *self { - DropFlagInfo::None => return None, - DropFlagInfo::DontZeroJustUse(id) | - DropFlagInfo::ZeroAndMaintain(id) => id, - }; - - let hints = bcx.fcx.lldropflag_hints.borrow(); - let retval = hints.hint_datum(id); - assert!(retval.is_some(), "An id (={}) means must have a hint", id); - retval - } -} - -// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients -// may not realize that subparts of an Lvalue can have a subset of -// drop-flags associated with them, while this as written will just -// memcpy the drop_flag_info. But, it is an easier way to get `_match` -// off the ground to just let this be `Copy` for now. -#[derive(Copy, Clone, Debug)] -pub struct Lvalue { - pub source: &'static str, - pub drop_flag_info: DropFlagInfo -} - -#[derive(Debug)] -pub struct Rvalue { - pub mode: RvalueMode -} - -/// Classifies what action we should take when a value is moved away -/// with respect to its drop-flag. -/// -/// Long term there will be no need for this classification: all flags -/// (which will be stored on the stack frame) will have the same -/// interpretation and maintenance code associated with them. -#[derive(Copy, Clone, Debug)] -pub enum HintKind { - /// When the value is moved, set the drop-flag to "dropped" - /// (i.e. "zero the flag", even when the specific representation - /// is not literally 0) and when it is reinitialized, set the - /// drop-flag back to "initialized". - ZeroAndMaintain, - - /// When the value is moved, do not set the drop-flag to "dropped" - /// However, continue to read the drop-flag in deciding whether to - /// drop. (In essence, the path/fragment in question will never - /// need to be dropped at the points where it is moved away by - /// this code, but we are defending against the scenario where - /// some *other* code could move away (or drop) the value and thus - /// zero-the-flag, which is why we will still read from it. - DontZeroJustUse, -} - -impl Lvalue { // Constructors for various Lvalues. - pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue { - debug!("Lvalue at {} no drop flag info", source); - Lvalue { source: source, drop_flag_info: DropFlagInfo::None } - } - - pub fn new_dropflag_hint(source: &'static str) -> Lvalue { - debug!("Lvalue at {} is drop flag hint", source); - Lvalue { source: source, drop_flag_info: DropFlagInfo::None } - } - - pub fn new_with_hint<'blk, 'tcx>(source: &'static str, - bcx: Block<'blk, 'tcx>, - id: ast::NodeId, - k: HintKind) -> Lvalue { - let (opt_id, info) = { - let hint_available = Lvalue::has_dropflag_hint(bcx, id) && - bcx.tcx().sess.nonzeroing_move_hints(); - let info = match k { - HintKind::ZeroAndMaintain if hint_available => - DropFlagInfo::ZeroAndMaintain(id), - HintKind::DontZeroJustUse if hint_available => - DropFlagInfo::DontZeroJustUse(id), - _ => - DropFlagInfo::None, - }; - (Some(id), info) - }; - debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info); - Lvalue { source: source, drop_flag_info: info } - } -} // end Lvalue constructor methods. - -impl Lvalue { - fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - id: ast::NodeId) -> bool { - let hints = bcx.fcx.lldropflag_hints.borrow(); - hints.has_hint(id) - } - pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) - -> Option> { - self.drop_flag_info.hint_datum(bcx) - } -} - -impl Rvalue { - pub fn new(m: RvalueMode) -> Rvalue { - Rvalue { mode: m } - } -} - -// Make Datum linear for more type safety. -impl Drop for Rvalue { - fn drop(&mut self) { } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum RvalueMode { - /// `val` is a pointer to the actual value (and thus has type *T) - ByRef, - - /// `val` is the actual value (*only used for immediates* like ints, ptrs) - ByValue, -} - -pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue> { - return Datum::new(val, ty, Rvalue::new(ByValue)); -} - -pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - return DatumBlock::new(bcx, immediate_rvalue(val, ty)) -} - -/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to -/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should -/// initialize the memory. -/// -/// The flag `zero` indicates how the temporary space itself should be -/// initialized at the outset of the function; the only time that -/// `InitAlloca::Uninit` is a valid value for `zero` is when the -/// caller can prove that either (1.) the code injected by `populate` -/// onto `bcx` always dominates the end of `scope`, or (2.) the data -/// being allocated has no associated destructor. -pub fn lvalue_scratch_datum<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str, - zero: InitAlloca, - scope: cleanup::ScopeId, - populate: F) - -> DatumBlock<'blk, 'tcx, Lvalue> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, -{ - // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed. - // (See discussion at Issue 30530.) - let scratch = alloc_ty_init(bcx, ty, zero, name); - debug!("lvalue_scratch_datum scope={:?} scratch={:?} ty={:?}", - scope, Value(scratch), ty); - - // Subtle. Populate the scratch memory *before* scheduling cleanup. - let bcx = populate(bcx, scratch); - bcx.fcx.schedule_drop_mem(scope, scratch, ty, None); - - DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum"))) -} - -/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to -/// it. If `zero` is true, the space will be zeroed when it is allocated; this is normally not -/// necessary, but in the case of automatic rooting in match statements it is possible to have -/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them. -/// You must arrange any cleanups etc yourself! -pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str) - -> Datum<'tcx, Rvalue> { - let scratch = alloc_ty(bcx, ty, name); - call_lifetime_start(bcx, scratch); - Datum::new(scratch, ty, Rvalue::new(ByRef)) -} - -/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending -/// on whether type is immediate or not. -pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> RvalueMode { - if type_is_immediate(ccx, ty) { - ByValue - } else { - ByRef - } -} - -fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode, - fcx: &FunctionContext<'a, 'tcx>, - scope: cleanup::ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { - debug!("add_rvalue_clean scope={:?} val={:?} ty={:?}", - scope, Value(val), ty); - match mode { - ByValue => { fcx.schedule_drop_immediate(scope, val, ty); } - ByRef => { - fcx.schedule_lifetime_end(scope, val); - fcx.schedule_drop_mem(scope, val, ty, None); - } - } -} - -pub trait KindOps { - - /// Take appropriate action after the value in `datum` has been - /// stored to a new location. - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx>; - - /// True if this mode is a reference mode, meaning that the datum's - /// val field is a pointer to the actual value - fn is_by_ref(&self) -> bool; - - /// Converts to an Expr kind - fn to_expr_kind(self) -> Expr; - -} - -impl KindOps for Rvalue { - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - _val: ValueRef, - _ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - // No cleanup is scheduled for an rvalue, so we don't have - // to do anything after a move to cancel or duplicate it. - if self.is_by_ref() { - call_lifetime_end(bcx, _val); - } - bcx - } - - fn is_by_ref(&self) -> bool { - self.mode == ByRef - } - - fn to_expr_kind(self) -> Expr { - RvalueExpr(self) - } -} - -impl KindOps for Lvalue { - /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel - /// cleanup. If an @T lvalue is copied, we must increment the reference count. - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("::post_store"); - if bcx.fcx.type_needs_drop(ty) { - // cancel cleanup of affine values: - // 1. if it has drop-hint, mark as moved; then code - // aware of drop-hint won't bother calling the - // drop-glue itself. - if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) { - let moved_hint_byte = adt::DTOR_MOVED_HINT; - let hint_llval = hint_datum.to_value().value(); - Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval); - } - // 2. if the drop info says its necessary, drop-fill the memory. - if self.drop_flag_info.must_zero() { - let () = drop_done_fill_mem(bcx, val, ty); - } - bcx - } else { - // FIXME (#5016) would be nice to assert this, but we have - // to allow for e.g. DontZeroJustUse flags, for now. - // - // (The dropflag hint construction should be taking - // !type_needs_drop into account; earlier analysis phases - // may not have all the info they need to include such - // information properly, I think; in particular the - // fragments analysis works on a non-monomorphized view of - // the code.) - // - // assert_eq!(self.drop_flag_info, DropFlagInfo::None); - bcx - } - } - - fn is_by_ref(&self) -> bool { - true - } - - fn to_expr_kind(self) -> Expr { - LvalueExpr(self) - } -} - -impl KindOps for Expr { - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - match *self { - LvalueExpr(ref l) => l.post_store(bcx, val, ty), - RvalueExpr(ref r) => r.post_store(bcx, val, ty), - } - } - - fn is_by_ref(&self) -> bool { - match *self { - LvalueExpr(ref l) => l.is_by_ref(), - RvalueExpr(ref r) => r.is_by_ref() - } - } - - fn to_expr_kind(self) -> Expr { - self - } -} - -impl<'tcx> Datum<'tcx, Rvalue> { - /// Schedules a cleanup for this datum in the given scope. That means that this datum is no - /// longer an rvalue datum; hence, this function consumes the datum and returns the contained - /// ValueRef. - pub fn add_clean<'a>(self, - fcx: &FunctionContext<'a, 'tcx>, - scope: cleanup::ScopeId) - -> ValueRef { - add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty); - self.val - } - - /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not - /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`. - pub fn to_lvalue_datum_in_scope<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &str, - scope: cleanup::ScopeId) - -> DatumBlock<'blk, 'tcx, Lvalue> { - let fcx = bcx.fcx; - - match self.kind.mode { - ByRef => { - add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty); - DatumBlock::new(bcx, Datum::new( - self.val, - self.ty, - Lvalue::new("datum::to_lvalue_datum_in_scope"))) - } - - ByValue => { - lvalue_scratch_datum( - bcx, self.ty, name, InitAlloca::Dropped, scope, - |bcx, llval| { - debug!("populate call for Datum::to_lvalue_datum_in_scope \ - self.ty={:?}", self.ty); - // do not call_lifetime_start here; the - // `InitAlloc::Dropped` will start scratch - // value's lifetime at open of function body. - let bcx = self.store_to(bcx, llval); - bcx.fcx.schedule_lifetime_end(scope, llval); - bcx - }) - } - } - } - - pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - let mut bcx = bcx; - match self.kind.mode { - ByRef => DatumBlock::new(bcx, self), - ByValue => { - let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref"); - bcx = self.store_to(bcx, scratch.val); - DatumBlock::new(bcx, scratch) - } - } - } - - pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - match self.appropriate_rvalue_mode(bcx.ccx()) { - ByRef => { - self.to_ref_datum(bcx) - } - ByValue => { - match self.kind.mode { - ByValue => DatumBlock::new(bcx, self), - ByRef => { - let llval = load_ty(bcx, self.val, self.ty); - call_lifetime_end(bcx, self.val); - DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue))) - } - } - } - } - } -} - -/// Methods suitable for "expr" datums that could be either lvalues or -/// rvalues. These include coercions into lvalues/rvalues but also a number -/// of more general operations. (Some of those operations could be moved to -/// the more general `impl Datum`, but it's convenient to have them -/// here since we can `match self.kind` rather than having to implement -/// generic methods in `KindOps`.) -impl<'tcx> Datum<'tcx, Expr> { - fn match_kind(self, if_lvalue: F, if_rvalue: G) -> R where - F: FnOnce(Datum<'tcx, Lvalue>) -> R, - G: FnOnce(Datum<'tcx, Rvalue>) -> R, - { - let Datum { val, ty, kind } = self; - match kind { - LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)), - RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)), - } - } - - /// Asserts that this datum *is* an lvalue and returns it. - #[allow(dead_code)] // potentially useful - pub fn assert_lvalue(self) -> Datum<'tcx, Lvalue> { - self.match_kind( - |d| d, - |_| bug!("assert_lvalue given rvalue")) - } - - pub fn store_to_dest<'blk>(self, - bcx: Block<'blk, 'tcx>, - dest: expr::Dest, - expr_id: ast::NodeId) - -> Block<'blk, 'tcx> { - match dest { - expr::Ignore => { - self.add_clean_if_rvalue(bcx, expr_id); - bcx - } - expr::SaveIn(addr) => { - self.store_to(bcx, addr) - } - } - } - - /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value - /// that may need drop. - pub fn add_clean_if_rvalue<'blk>(self, - bcx: Block<'blk, 'tcx>, - expr_id: ast::NodeId) { - self.match_kind( - |_| { /* Nothing to do, cleanup already arranged */ }, - |r| { - let scope = cleanup::temporary_scope(bcx.tcx(), expr_id); - r.add_clean(bcx.fcx, scope); - }) - } - - pub fn to_lvalue_datum<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &str, - expr_id: ast::NodeId) - -> DatumBlock<'blk, 'tcx, Lvalue> { - debug!("to_lvalue_datum self: {:?}", self); - - self.match_kind( - |l| DatumBlock::new(bcx, l), - |r| { - let scope = cleanup::temporary_scope(bcx.tcx(), expr_id); - r.to_lvalue_datum_in_scope(bcx, name, scope) - }) - } - - /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled). - pub fn to_rvalue_datum<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &'static str) - -> DatumBlock<'blk, 'tcx, Rvalue> { - self.match_kind( - |l| { - let mut bcx = bcx; - match l.appropriate_rvalue_mode(bcx.ccx()) { - ByRef => { - let scratch = rvalue_scratch_datum(bcx, l.ty, name); - bcx = l.store_to(bcx, scratch.val); - DatumBlock::new(bcx, scratch) - } - ByValue => { - let v = load_ty(bcx, l.val, l.ty); - bcx = l.kind.post_store(bcx, l.val, l.ty); - DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue))) - } - } - }, - |r| DatumBlock::new(bcx, r)) - } - -} - -/// Methods suitable only for lvalues. These include the various -/// operations to extract components out of compound data structures, -/// such as extracting the field from a struct or a particular element -/// from an array. -impl<'tcx> Datum<'tcx, Lvalue> { - /// Converts a datum into a by-ref value. The datum type must be one which is always passed by - /// reference. - pub fn to_llref(self) -> ValueRef { - self.val - } - - // Extracts a component of a compound data structure (e.g., a field from a - // struct). Note that if self is an opened, unsized type then the returned - // datum may also be unsized _without the size information_. It is the - // callers responsibility to package the result in some way to make a valid - // datum in that case (e.g., by making a fat pointer or opened pair). - pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>, - gep: F) - -> Datum<'tcx, Lvalue> where - F: FnOnce(adt::MaybeSizedValue) -> ValueRef, - { - let val = if type_is_sized(bcx.tcx(), self.ty) { - let val = adt::MaybeSizedValue::sized(self.val); - gep(val) - } else { - let val = adt::MaybeSizedValue::unsized_( - Load(bcx, expr::get_dataptr(bcx, self.val)), - Load(bcx, expr::get_meta(bcx, self.val))); - gep(val) - }; - Datum { - val: val, - kind: Lvalue::new("Datum::get_element"), - ty: ty, - } - } - - pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>) - -> (ValueRef, ValueRef) { - //! Converts a vector into the slice pair. - - tvec::get_base_and_len(bcx, self.val, self.ty) - } -} - -/// Generic methods applicable to any sort of datum. -impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { - pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> { - Datum { val: val, ty: ty, kind: kind } - } - - pub fn to_expr_datum(self) -> Datum<'tcx, Expr> { - let Datum { val, ty, kind } = self; - Datum { val: val, ty: ty, kind: kind.to_expr_kind() } - } - - /// Moves or copies this value into a new home, as appropriate depending on the type of the - /// datum. This method consumes the datum, since it would be incorrect to go on using the datum - /// if the value represented is affine (and hence the value is moved). - pub fn store_to<'blk>(self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - self.shallow_copy_raw(bcx, dst); - - self.kind.post_store(bcx, self.val, self.ty) - } - - /// Helper function that performs a shallow copy of this value into `dst`, which should be a - /// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized - /// memory (either newly allocated, zeroed, or dropped). - /// - /// This function is private to datums because it leaves memory in an unstable state, where the - /// source value has been copied but not zeroed. Public methods are `store_to` (if you no - /// longer need the source value) or `shallow_copy` (if you wish the source value to remain - /// valid). - fn shallow_copy_raw<'blk>(&self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("copy_to_no_check"); - - if type_is_zero_size(bcx.ccx(), self.ty) { - return bcx; - } - - if self.kind.is_by_ref() { - memcpy_ty(bcx, dst, self.val, self.ty); - } else { - store_ty(bcx, self.val, dst, self.ty); - } - - return bcx; - } - - /// Copies the value into a new location. This function always preserves the existing datum as - /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine - /// values (since they must never be duplicated). - pub fn shallow_copy<'blk>(&self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - /*! - * Copies the value into a new location. This function always - * preserves the existing datum as a valid value. Therefore, - * it does not consume `self` and, also, cannot be applied to - * affine values (since they must never be duplicated). - */ - - assert!(!self.ty.moves_by_default(bcx.tcx(), - &bcx.tcx().empty_parameter_environment(), DUMMY_SP)); - self.shallow_copy_raw(bcx, dst) - } - - /// See the `appropriate_rvalue_mode()` function - pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>) - -> RvalueMode { - appropriate_rvalue_mode(ccx, self.ty) - } - - /// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of - /// responsibility to cleanup the value). For this to work, the value must be something - /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is - /// naturally passed around by value, and not by reference. - pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef { - assert!(!bcx.fcx.type_needs_drop(self.ty)); - assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue); - if self.kind.is_by_ref() { - load_ty(bcx, self.val, self.ty) - } else { - self.val - } - } - - pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef { - assert!(self.ty.is_bool()); - self.to_llscalarish(bcx) - } -} - -impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> { - pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>) - -> DatumBlock<'blk, 'tcx, K> { - DatumBlock { bcx: bcx, datum: datum } - } -} - -impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> { - pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> { - DatumBlock::new(self.bcx, self.datum.to_expr_datum()) - } -} - -impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> { - pub fn store_to_dest(self, - dest: expr::Dest, - expr_id: ast::NodeId) -> Block<'blk, 'tcx> { - let DatumBlock { bcx, datum } = self; - datum.store_to_dest(bcx, dest, expr_id) - } - - pub fn to_llbool(self) -> Result<'blk, 'tcx> { - let DatumBlock { datum, bcx } = self; - Result::new(bcx, datum.to_llbool(bcx)) - } -} diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index fe6a48d4c559d..58cf85747374a 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -15,58 +15,15 @@ use super::utils::{DIB, span_start}; use llvm; use llvm::debuginfo::{DIScope, DISubprogram}; use common::{CrateContext, FunctionContext}; -use rustc::hir::pat_util; use rustc::mir::repr::{Mir, VisibilityScope}; -use rustc::util::nodemap::NodeMap; use libc::c_uint; use std::ptr; -use syntax_pos::{Span, Pos}; -use syntax::{ast, codemap}; +use syntax_pos::Pos; use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use rustc::hir::{self, PatKind}; - -// This procedure builds the *scope map* for a given function, which maps any -// given ast::NodeId in the function's AST to the correct DIScope metadata instance. -// -// This builder procedure walks the AST in execution order and keeps track of -// what belongs to which scope, creating DIScope DIEs along the way, and -// introducing *artificial* lexical scope descriptors where necessary. These -// artificial scopes allow GDB to correctly handle name shadowing. -pub fn create_scope_map(cx: &CrateContext, - args: &[hir::Arg], - fn_entry_block: &hir::Block, - fn_metadata: DISubprogram, - fn_ast_id: ast::NodeId) - -> NodeMap { - let mut scope_map = NodeMap(); - let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None }); - scope_map.insert(fn_ast_id, fn_metadata); - - // Push argument identifiers onto the stack so arguments integrate nicely - // with variable shadowing. - for arg in args { - pat_util::pat_bindings(&arg.pat, |_, node_id, _, path1| { - scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata, - name: Some(path1.node) }); - scope_map.insert(node_id, fn_metadata); - }) - } - - // Clang creates a separate scope for function bodies, so let's do this too. - with_new_scope(cx, - fn_entry_block.span, - &mut scope_stack, - &mut scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, fn_entry_block, scope_stack, scope_map); - }); - - return scope_map; -} /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. @@ -141,405 +98,3 @@ fn make_mir_scope(ccx: &CrateContext, loc.col.to_usize() as c_uint) }; } - -// local helper functions for walking the AST. -fn with_new_scope(cx: &CrateContext, - scope_span: Span, - scope_stack: &mut Vec , - scope_map: &mut NodeMap, - inner_walk: F) where - F: FnOnce(&CrateContext, &mut Vec, &mut NodeMap), -{ - // Create a new lexical scope and push it onto the stack - let loc = span_start(cx, scope_span); - let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path); - let parent_scope = scope_stack.last().unwrap().scope_metadata; - - let scope_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateLexicalBlock( - DIB(cx), - parent_scope, - file_metadata, - loc.line as c_uint, - loc.col.to_usize() as c_uint) - }; - - scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None }); - - inner_walk(cx, scope_stack, scope_map); - - // pop artificial scopes - while scope_stack.last().unwrap().name.is_some() { - scope_stack.pop(); - } - - if scope_stack.last().unwrap().scope_metadata != scope_metadata { - span_bug!(scope_span, "debuginfo: Inconsistency in scope management."); - } - - scope_stack.pop(); -} - -struct ScopeStackEntry { - scope_metadata: DIScope, - name: Option -} - -fn walk_block(cx: &CrateContext, - block: &hir::Block, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata); - - // The interesting things here are statements and the concluding expression. - for statement in &block.stmts { - scope_map.insert(statement.node.id(), - scope_stack.last().unwrap().scope_metadata); - - match statement.node { - hir::StmtDecl(ref decl, _) => - walk_decl(cx, &decl, scope_stack, scope_map), - hir::StmtExpr(ref exp, _) | - hir::StmtSemi(ref exp, _) => - walk_expr(cx, &exp, scope_stack, scope_map), - } - } - - if let Some(ref exp) = block.expr { - walk_expr(cx, &exp, scope_stack, scope_map); - } -} - -fn walk_decl(cx: &CrateContext, - decl: &hir::Decl, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - match *decl { - codemap::Spanned { node: hir::DeclLocal(ref local), .. } => { - scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata); - - walk_pattern(cx, &local.pat, scope_stack, scope_map); - - if let Some(ref exp) = local.init { - walk_expr(cx, &exp, scope_stack, scope_map); - } - } - _ => () - } -} - -fn walk_pattern(cx: &CrateContext, - pat: &hir::Pat, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - // Unfortunately, we cannot just use pat_util::pat_bindings() or - // ast_util::walk_pat() here because we have to visit *all* nodes in - // order to put them into the scope map. The above functions don't do that. - match pat.node { - PatKind::Binding(_, ref path1, ref sub_pat_opt) => { - // LLVM does not properly generate 'DW_AT_start_scope' fields - // for variable DIEs. For this reason we have to introduce - // an artificial scope at bindings whenever a variable with - // the same name is declared in *any* parent scope. - // - // Otherwise the following error occurs: - // - // let x = 10; - // - // do_something(); // 'gdb print x' correctly prints 10 - // - // { - // do_something(); // 'gdb print x' prints 0, because it - // // already reads the uninitialized 'x' - // // from the next line... - // let x = 100; - // do_something(); // 'gdb print x' correctly prints 100 - // } - - // Is there already a binding with that name? - // N.B.: this comparison must be UNhygienic... because - // gdb knows nothing about the context, so any two - // variables with the same name will cause the problem. - let name = path1.node; - let need_new_scope = scope_stack - .iter() - .any(|entry| entry.name == Some(name)); - - if need_new_scope { - // Create a new lexical scope and push it onto the stack - let loc = span_start(cx, pat.span); - let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path); - let parent_scope = scope_stack.last().unwrap().scope_metadata; - - let scope_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateLexicalBlock( - DIB(cx), - parent_scope, - file_metadata, - loc.line as c_uint, - loc.col.to_usize() as c_uint) - }; - - scope_stack.push(ScopeStackEntry { - scope_metadata: scope_metadata, - name: Some(name) - }); - - } else { - // Push a new entry anyway so the name can be found - let prev_metadata = scope_stack.last().unwrap().scope_metadata; - scope_stack.push(ScopeStackEntry { - scope_metadata: prev_metadata, - name: Some(name) - }); - } - - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - if let Some(ref sub_pat) = *sub_pat_opt { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - } - - PatKind::Wild => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - } - - PatKind::TupleStruct(_, ref sub_pats, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for p in sub_pats { - walk_pattern(cx, &p, scope_stack, scope_map); - } - } - - PatKind::Path(..) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - } - - PatKind::Struct(_, ref field_pats, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for &codemap::Spanned { - node: hir::FieldPat { pat: ref sub_pat, .. }, - .. - } in field_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - } - - PatKind::Tuple(ref sub_pats, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for sub_pat in sub_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - } - - PatKind::Box(ref sub_pat) | PatKind::Ref(ref sub_pat, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - - PatKind::Lit(ref exp) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_expr(cx, &exp, scope_stack, scope_map); - } - - PatKind::Range(ref exp1, ref exp2) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_expr(cx, &exp1, scope_stack, scope_map); - walk_expr(cx, &exp2, scope_stack, scope_map); - } - - PatKind::Vec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for sub_pat in front_sub_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - - if let Some(ref sub_pat) = *middle_sub_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - - for sub_pat in back_sub_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - } - } -} - -fn walk_expr(cx: &CrateContext, - exp: &hir::Expr, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - - scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata); - - match exp.node { - hir::ExprLit(_) | - hir::ExprBreak(_) | - hir::ExprAgain(_) | - hir::ExprPath(..) => {} - - hir::ExprCast(ref sub_exp, _) | - hir::ExprType(ref sub_exp, _) | - hir::ExprAddrOf(_, ref sub_exp) | - hir::ExprField(ref sub_exp, _) | - hir::ExprTupField(ref sub_exp, _) => - walk_expr(cx, &sub_exp, scope_stack, scope_map), - - hir::ExprBox(ref sub_expr) => { - walk_expr(cx, &sub_expr, scope_stack, scope_map); - } - - hir::ExprRet(ref exp_opt) => match *exp_opt { - Some(ref sub_exp) => walk_expr(cx, &sub_exp, scope_stack, scope_map), - None => () - }, - - hir::ExprUnary(_, ref sub_exp) => { - walk_expr(cx, &sub_exp, scope_stack, scope_map); - } - - hir::ExprAssignOp(_, ref lhs, ref rhs) | - hir::ExprIndex(ref lhs, ref rhs) | - hir::ExprBinary(_, ref lhs, ref rhs) => { - walk_expr(cx, &lhs, scope_stack, scope_map); - walk_expr(cx, &rhs, scope_stack, scope_map); - } - - hir::ExprVec(ref init_expressions) | - hir::ExprTup(ref init_expressions) => { - for ie in init_expressions { - walk_expr(cx, &ie, scope_stack, scope_map); - } - } - - hir::ExprAssign(ref sub_exp1, ref sub_exp2) | - hir::ExprRepeat(ref sub_exp1, ref sub_exp2) => { - walk_expr(cx, &sub_exp1, scope_stack, scope_map); - walk_expr(cx, &sub_exp2, scope_stack, scope_map); - } - - hir::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => { - walk_expr(cx, &cond_exp, scope_stack, scope_map); - - with_new_scope(cx, - then_block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &then_block, scope_stack, scope_map); - }); - - match *opt_else_exp { - Some(ref else_exp) => - walk_expr(cx, &else_exp, scope_stack, scope_map), - _ => () - } - } - - hir::ExprWhile(ref cond_exp, ref loop_body, _) => { - walk_expr(cx, &cond_exp, scope_stack, scope_map); - - with_new_scope(cx, - loop_body.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &loop_body, scope_stack, scope_map); - }) - } - - hir::ExprLoop(ref block, _) | - hir::ExprBlock(ref block) => { - with_new_scope(cx, - block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &block, scope_stack, scope_map); - }) - } - - hir::ExprClosure(_, ref decl, ref block, _) => { - with_new_scope(cx, - block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - for &hir::Arg { pat: ref pattern, .. } in &decl.inputs { - walk_pattern(cx, &pattern, scope_stack, scope_map); - } - - walk_block(cx, &block, scope_stack, scope_map); - }) - } - - hir::ExprCall(ref fn_exp, ref args) => { - walk_expr(cx, &fn_exp, scope_stack, scope_map); - - for arg_exp in args { - walk_expr(cx, &arg_exp, scope_stack, scope_map); - } - } - - hir::ExprMethodCall(_, _, ref args) => { - for arg_exp in args { - walk_expr(cx, &arg_exp, scope_stack, scope_map); - } - } - - hir::ExprMatch(ref discriminant_exp, ref arms, _) => { - walk_expr(cx, &discriminant_exp, scope_stack, scope_map); - - // For each arm we have to first walk the pattern as these might - // introduce new artificial scopes. It should be sufficient to - // walk only one pattern per arm, as they all must contain the - // same binding names. - - for arm_ref in arms { - let arm_span = arm_ref.pats[0].span; - - with_new_scope(cx, - arm_span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - for pat in &arm_ref.pats { - walk_pattern(cx, &pat, scope_stack, scope_map); - } - - if let Some(ref guard_exp) = arm_ref.guard { - walk_expr(cx, &guard_exp, scope_stack, scope_map) - } - - walk_expr(cx, &arm_ref.body, scope_stack, scope_map); - }) - } - } - - hir::ExprStruct(_, ref fields, ref base_exp) => { - for &hir::Field { expr: ref exp, .. } in fields { - walk_expr(cx, &exp, scope_stack, scope_map); - } - - match *base_exp { - Some(ref exp) => walk_expr(cx, &exp, scope_stack, scope_map), - None => () - } - } - - hir::ExprInlineAsm(_, ref outputs, ref inputs) => { - for output in outputs { - walk_expr(cx, output, scope_stack, scope_map); - } - - for input in inputs { - walk_expr(cx, input, scope_stack, scope_map); - } - } - } -} diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index f505efb1ab2f9..ba91b44343868 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -14,11 +14,10 @@ use self::MemberDescriptionFactory::*; use self::EnumDiscriminantInfo::*; use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of, - get_namespace_and_span_for_item, create_DIArray, - fn_should_be_ignored, is_node_local_to_unit}; + get_namespace_and_span_for_item, create_DIArray, is_node_local_to_unit}; use super::namespace::mangled_name_of_item; use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name}; -use super::{declare_local, VariableKind, VariableAccess, CrateDebugContext}; +use super::{CrateDebugContext}; use context::SharedCrateContext; use session::Session; @@ -26,16 +25,13 @@ use llvm::{self, ValueRef}; use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType}; use rustc::hir::def_id::DefId; -use rustc::hir::pat_util; use rustc::ty::subst::Substs; -use rustc::hir::map as hir_map; -use rustc::hir::{self, PatKind}; +use rustc::hir; use {type_of, adt, machine, monomorphize}; -use common::{self, CrateContext, FunctionContext, Block}; -use _match::{BindingInfo, TransBindingMode}; +use common::CrateContext; use type_::Type; use rustc::ty::{self, Ty}; -use session::config::{self, FullDebugInfo}; +use session::config; use util::nodemap::FnvHashMap; use util::common::path2cstr; @@ -886,26 +882,6 @@ fn file_metadata_(cx: &CrateContext, key: &str, file_name: &str, work_dir: &str) file_metadata } -/// Finds the scope metadata node for the given AST node. -pub fn scope_metadata(fcx: &FunctionContext, - node_id: ast::NodeId, - error_reporting_span: Span) - -> DIScope { - let scope_map = &fcx.debug_context - .get_ref(error_reporting_span) - .scope_map; - match scope_map.borrow().get(&node_id).cloned() { - Some(scope_metadata) => scope_metadata, - None => { - let node = fcx.ccx.tcx().map.get(node_id); - - span_bug!(error_reporting_span, - "debuginfo: Could not find scope info for node {:?}", - node); - } - } -} - fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> DIType { @@ -1251,7 +1227,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); match *self.type_rep { - adt::General(_, ref struct_defs, _) => { + adt::General(_, ref struct_defs) => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); struct_defs @@ -1285,7 +1261,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() }, - adt::Univariant(ref struct_def, _) => { + adt::Univariant(ref struct_def) => { assert!(adt.variants.len() <= 1); if adt.variants.is_empty() { @@ -1635,7 +1611,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, adt::RawNullablePointer { .. } | adt::StructWrappedNullablePointer { .. } | adt::Univariant(..) => None, - adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)), + adt::General(inttype, _) => Some(discriminant_type_metadata(inttype)), }; let enum_llvm_type = type_of::type_of(cx, enum_type); @@ -1863,226 +1839,3 @@ pub fn create_global_var_metadata(cx: &CrateContext, ptr::null_mut()); } } - -/// Creates debug information for the given local variable. -/// -/// This function assumes that there's a datum for each pattern component of the -/// local in `bcx.fcx.lllocals`. -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_local_var_metadata(bcx: Block, local: &hir::Local) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let locals = bcx.fcx.lllocals.borrow(); - pat_util::pat_bindings(&local.pat, |_, node_id, span, var_name| { - let datum = match locals.get(&node_id) { - Some(datum) => datum, - None => { - span_bug!(span, - "no entry in lllocals table for {}", - node_id); - } - }; - - if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { - span_bug!(span, "debuginfo::create_local_var_metadata() - \ - Referenced variable location is not an alloca!"); - } - - let scope_metadata = scope_metadata(bcx.fcx, node_id, span); - - declare_local(bcx, - var_name.node, - datum.ty, - scope_metadata, - VariableAccess::DirectVariable { alloca: datum.val }, - VariableKind::LocalVariable, - span); - }) -} - -/// Creates debug information for a variable captured in a closure. -/// -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node_id: ast::NodeId, - env_pointer: ValueRef, - env_index: usize, - captured_by_ref: bool, - span: Span) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let cx = bcx.ccx(); - - let ast_item = cx.tcx().map.find(node_id); - - let variable_name = match ast_item { - None => { - span_bug!(span, "debuginfo::create_captured_var_metadata: node not found"); - } - Some(hir_map::NodeLocal(pat)) => { - match pat.node { - PatKind::Binding(_, ref path1, _) => { - path1.node - } - _ => { - span_bug!(span, - "debuginfo::create_captured_var_metadata() - \ - Captured var-id refers to unexpected \ - hir_map variant: {:?}", - ast_item); - } - } - } - _ => { - span_bug!(span, - "debuginfo::create_captured_var_metadata() - \ - Captured var-id refers to unexpected \ - hir_map variant: {:?}", - ast_item); - } - }; - - let variable_type = common::node_id_type(bcx, node_id); - let scope_metadata = bcx.fcx.debug_context.get_ref(span).fn_metadata; - - // env_pointer is the alloca containing the pointer to the environment, - // so it's type is **EnvironmentType. In order to find out the type of - // the environment we have to "dereference" two times. - let llvm_env_data_type = common::val_ty(env_pointer).element_type() - .element_type(); - let byte_offset_of_var_in_env = machine::llelement_offset(cx, - llvm_env_data_type, - env_index); - - let address_operations = unsafe { - [llvm::LLVMRustDIBuilderCreateOpDeref(), - llvm::LLVMRustDIBuilderCreateOpPlus(), - byte_offset_of_var_in_env as i64, - llvm::LLVMRustDIBuilderCreateOpDeref()] - }; - - let address_op_count = if captured_by_ref { - address_operations.len() - } else { - address_operations.len() - 1 - }; - - let variable_access = VariableAccess::IndirectVariable { - alloca: env_pointer, - address_operations: &address_operations[..address_op_count] - }; - - declare_local(bcx, - variable_name, - variable_type, - scope_metadata, - variable_access, - VariableKind::CapturedVariable, - span); -} - -/// Creates debug information for a local variable introduced in the head of a -/// match-statement arm. -/// -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - variable_name: ast::Name, - binding: BindingInfo<'tcx>) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span); - let aops = unsafe { - [llvm::LLVMRustDIBuilderCreateOpDeref()] - }; - // Regardless of the actual type (`T`) we're always passed the stack slot - // (alloca) for the binding. For ByRef bindings that's a `T*` but for ByMove - // bindings we actually have `T**`. So to get the actual variable we need to - // dereference once more. For ByCopy we just use the stack slot we created - // for the binding. - let var_access = match binding.trmode { - TransBindingMode::TrByCopy(llbinding) | - TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable { - alloca: llbinding - }, - TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable { - alloca: binding.llmatch, - address_operations: &aops - }, - TransBindingMode::TrByRef => VariableAccess::DirectVariable { - alloca: binding.llmatch - } - }; - - declare_local(bcx, - variable_name, - binding.ty, - scope_metadata, - var_access, - VariableKind::LocalVariable, - binding.span); -} - -/// Creates debug information for the given function argument. -/// -/// This function assumes that there's a datum for each pattern component of the -/// argument in `bcx.fcx.lllocals`. -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let scope_metadata = bcx - .fcx - .debug_context - .get_ref(arg.pat.span) - .fn_metadata; - let locals = bcx.fcx.lllocals.borrow(); - - pat_util::pat_bindings(&arg.pat, |_, node_id, span, var_name| { - let datum = match locals.get(&node_id) { - Some(v) => v, - None => { - span_bug!(span, "no entry in lllocals table for {}", node_id); - } - }; - - if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { - span_bug!(span, "debuginfo::create_argument_metadata() - \ - Referenced variable location is not an alloca!"); - } - - let argument_index = { - let counter = &bcx - .fcx - .debug_context - .get_ref(span) - .argument_counter; - let argument_index = counter.get(); - counter.set(argument_index + 1); - argument_index - }; - - declare_local(bcx, - var_name.node, - datum.ty, - scope_metadata, - VariableAccess::DirectVariable { alloca: datum.val }, - VariableKind::ArgumentVariable(argument_index), - span); - }) -} diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 1ee000992b9c5..cbf423b0739a3 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -27,15 +27,14 @@ use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilderRef, DISubprogram, DIArr use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::ty::subst::Substs; -use rustc::hir; use abi::Abi; -use common::{NodeIdAndSpan, CrateContext, FunctionContext, Block, BlockAndBuilder}; -use inline; +use common::{CrateContext, FunctionContext, Block, BlockAndBuilder}; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; +use rustc::mir::repr as mir; use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo}; -use util::nodemap::{DefIdMap, NodeMap, FnvHashMap, FnvHashSet}; +use util::nodemap::{DefIdMap, FnvHashMap, FnvHashSet}; use libc::c_uint; use std::cell::{Cell, RefCell}; @@ -56,13 +55,7 @@ mod source_loc; pub use self::create_scope_map::create_mir_scopes; pub use self::source_loc::start_emitting_source_locations; -pub use self::source_loc::get_cleanup_debug_loc_for_ast_node; -pub use self::source_loc::with_source_location_override; -pub use self::metadata::create_match_binding_metadata; -pub use self::metadata::create_argument_metadata; -pub use self::metadata::create_captured_var_metadata; pub use self::metadata::create_global_var_metadata; -pub use self::metadata::create_local_var_metadata; #[allow(non_upper_case_globals)] const DW_TAG_auto_variable: c_uint = 0x100; @@ -140,9 +133,7 @@ impl FunctionDebugContext { } pub struct FunctionDebugContextData { - scope_map: RefCell>, fn_metadata: DISubprogram, - argument_counter: Cell, source_locations_enabled: Cell, source_location_override: Cell, } @@ -229,7 +220,8 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>, sig: &ty::FnSig<'tcx>, abi: Abi, - llfn: ValueRef) -> FunctionDebugContext { + llfn: ValueRef, + mir: &mir::Mir) -> FunctionDebugContext { if cx.sess().opts.debuginfo == NoDebugInfo { return FunctionDebugContext::DebugInfoDisabled; } @@ -238,8 +230,8 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // Do this here already, in case we do an early exit from this function. source_loc::set_debug_location(cx, None, UnknownLocation); - let instance = inline::maybe_inline_instance(cx, instance); - let (containing_scope, span) = get_containing_scope_and_span(cx, instance); + let containing_scope = get_containing_scope(cx, instance); + let span = mir.span; // This can be the case for functions inlined from another crate if span == syntax_pos::DUMMY_SP { @@ -305,9 +297,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // Initialize fn debug context (including scope map and namespace map) let fn_debug_context = box FunctionDebugContextData { - scope_map: RefCell::new(NodeMap()), fn_metadata: fn_metadata, - argument_counter: Cell::new(1), source_locations_enabled: Cell::new(false), source_location_override: Cell::new(false), }; @@ -414,9 +404,9 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, names } - fn get_containing_scope_and_span<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>, - instance: Instance<'tcx>) - -> (DIScope, Span) { + fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>, + instance: Instance<'tcx>) + -> DIScope { // First, let's see if this is a method within an inherent impl. Because // if yes, we want to make the result subroutine DIE a child of the // subroutine's self-type. @@ -436,41 +426,15 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } }); - let containing_scope = self_type.unwrap_or_else(|| { + self_type.unwrap_or_else(|| { namespace::item_namespace(cx, DefId { krate: instance.def.krate, index: cx.tcx() .def_key(instance.def) .parent - .expect("get_containing_scope_and_span: missing parent?") + .expect("get_containing_scope: missing parent?") }) - }); - - // Try to get some span information, if we have an inlined item. - let definition_span = cx.tcx() - .map - .def_id_span(instance.def, syntax_pos::DUMMY_SP); - - (containing_scope, definition_span) - } -} - -/// Computes the scope map for a function given its declaration and body. -pub fn fill_scope_map_for_function<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - fn_decl: &hir::FnDecl, - top_level_block: &hir::Block, - fn_ast_id: ast::NodeId) { - match fcx.debug_context { - FunctionDebugContext::RegularContext(box ref data) => { - let scope_map = create_scope_map::create_scope_map(fcx.ccx, - &fn_decl.inputs, - top_level_block, - data.fn_metadata, - fn_ast_id); - *data.scope_map.borrow_mut() = scope_map; - } - FunctionDebugContext::DebugInfoDisabled | - FunctionDebugContext::FunctionWithoutDebugInfo => {} + }) } } @@ -548,7 +512,6 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum DebugLoc { - At(ast::NodeId, Span), ScopeAt(DIScope, Span), None } @@ -562,28 +525,3 @@ impl DebugLoc { source_loc::set_source_location(bcx.fcx(), Some(bcx), self); } } - -pub trait ToDebugLoc { - fn debug_loc(&self) -> DebugLoc; -} - -impl ToDebugLoc for hir::Expr { - fn debug_loc(&self) -> DebugLoc { - DebugLoc::At(self.id, self.span) - } -} - -impl ToDebugLoc for NodeIdAndSpan { - fn debug_loc(&self) -> DebugLoc { - DebugLoc::At(self.id, self.span) - } -} - -impl ToDebugLoc for Option { - fn debug_loc(&self) -> DebugLoc { - match *self { - Some(NodeIdAndSpan { id, span }) => DebugLoc::At(id, span), - None => DebugLoc::None - } - } -} diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index d288b9dcef70b..1aee27c144a36 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -11,79 +11,17 @@ use self::InternalDebugLocation::*; use super::utils::{debug_context, span_start}; -use super::metadata::{scope_metadata,UNKNOWN_COLUMN_NUMBER}; +use super::metadata::{UNKNOWN_COLUMN_NUMBER}; use super::{FunctionDebugContext, DebugLoc}; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; -use common::{NodeIdAndSpan, CrateContext, FunctionContext}; +use common::{CrateContext, FunctionContext}; use libc::c_uint; use std::ptr; -use syntax_pos::{self, Span, Pos}; -use syntax::ast; - -pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - node_id: ast::NodeId, - node_span: Span, - is_block: bool) - -> NodeIdAndSpan { - // A debug location needs two things: - // (1) A span (of which only the beginning will actually be used) - // (2) An AST node-id which will be used to look up the lexical scope - // for the location in the functions scope-map - // - // This function will calculate the debug location for compiler-generated - // cleanup calls that are executed when control-flow leaves the - // scope identified by `node_id`. - // - // For everything but block-like things we can simply take id and span of - // the given expression, meaning that from a debugger's view cleanup code is - // executed at the same source location as the statement/expr itself. - // - // Blocks are a special case. Here we want the cleanup to be linked to the - // closing curly brace of the block. The *scope* the cleanup is executed in - // is up to debate: It could either still be *within* the block being - // cleaned up, meaning that locals from the block are still visible in the - // debugger. - // Or it could be in the scope that the block is contained in, so any locals - // from within the block are already considered out-of-scope and thus not - // accessible in the debugger anymore. - // - // The current implementation opts for the second option: cleanup of a block - // already happens in the parent scope of the block. The main reason for - // this decision is that scoping becomes controlflow dependent when variable - // shadowing is involved and it's impossible to decide statically which - // scope is actually left when the cleanup code is executed. - // In practice it shouldn't make much of a difference. - - let mut cleanup_span = node_span; - - if is_block { - // Not all blocks actually have curly braces (e.g. simple closure - // bodies), in which case we also just want to return the span of the - // whole expression. - let code_snippet = cx.sess().codemap().span_to_snippet(node_span); - if let Ok(code_snippet) = code_snippet { - let bytes = code_snippet.as_bytes(); - - if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" { - cleanup_span = Span { - lo: node_span.hi - syntax_pos::BytePos(1), - hi: node_span.hi, - expn_id: node_span.expn_id - }; - } - } - } - - NodeIdAndSpan { - id: node_id, - span: cleanup_span - } -} - +use syntax_pos::Pos; /// Sets the current debug location at the beginning of the span. /// @@ -109,9 +47,6 @@ pub fn set_source_location(fcx: &FunctionContext, let dbg_loc = if function_debug_context.source_locations_enabled.get() { let (scope, span) = match debug_loc { - DebugLoc::At(node_id, span) => { - (scope_metadata(fcx, node_id, span), span) - } DebugLoc::ScopeAt(scope, span) => (scope, span), DebugLoc::None => { set_debug_location(fcx.ccx, builder, UnknownLocation); @@ -129,35 +64,6 @@ pub fn set_source_location(fcx: &FunctionContext, set_debug_location(fcx.ccx, builder, dbg_loc); } -/// This function makes sure that all debug locations emitted while executing -/// `wrapped_function` are set to the given `debug_loc`. -pub fn with_source_location_override(fcx: &FunctionContext, - debug_loc: DebugLoc, - wrapped_function: F) -> R - where F: FnOnce() -> R -{ - match fcx.debug_context { - FunctionDebugContext::DebugInfoDisabled => { - wrapped_function() - } - FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(fcx.ccx, None, UnknownLocation); - wrapped_function() - } - FunctionDebugContext::RegularContext(box ref function_debug_context) => { - if function_debug_context.source_location_override.get() { - wrapped_function() - } else { - debug_loc.apply(fcx); - function_debug_context.source_location_override.set(true); - let result = wrapped_function(); - function_debug_context.source_location_override.set(false); - result - } - } - } -} - /// Enables emitting source locations for the given functions. /// /// Since we don't want source locations to be emitted for the function prelude, diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs index 5734a12394113..3cdac485fecc9 100644 --- a/src/librustc_trans/debuginfo/utils.rs +++ b/src/librustc_trans/debuginfo/utils.rs @@ -10,7 +10,7 @@ // Utility Functions. -use super::{FunctionDebugContext, CrateDebugContext}; +use super::{CrateDebugContext}; use super::namespace::item_namespace; use rustc::hir::def_id::DefId; @@ -18,7 +18,7 @@ use rustc::hir::def_id::DefId; use llvm; use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray}; use machine; -use common::{CrateContext, FunctionContext}; +use common::{CrateContext}; use type_::Type; use syntax_pos::{self, Span}; @@ -70,13 +70,6 @@ pub fn DIB(cx: &CrateContext) -> DIBuilderRef { cx.dbg_cx().as_ref().unwrap().builder } -pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool { - match fcx.debug_context { - FunctionDebugContext::RegularContext(_) => false, - _ => true - } -} - pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId) -> (DIScope, Span) { let containing_scope = item_namespace(cx, DefId { diff --git a/src/librustc_trans/expr.rs b/src/librustc_trans/expr.rs deleted file mode 100644 index beb589c80bfc5..0000000000000 --- a/src/librustc_trans/expr.rs +++ /dev/null @@ -1,2473 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Translation of Expressions -//! -//! The expr module handles translation of expressions. The most general -//! translation routine is `trans()`, which will translate an expression -//! into a datum. `trans_into()` is also available, which will translate -//! an expression and write the result directly into memory, sometimes -//! avoiding the need for a temporary stack slot. Finally, -//! `trans_to_lvalue()` is available if you'd like to ensure that the -//! result has cleanup scheduled. -//! -//! Internally, each of these functions dispatches to various other -//! expression functions depending on the kind of expression. We divide -//! up expressions into: -//! -//! - **Datum expressions:** Those that most naturally yield values. -//! Examples would be `22`, `box x`, or `a + b` (when not overloaded). -//! - **DPS expressions:** Those that most naturally write into a location -//! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`. -//! - **Statement expressions:** That that do not generate a meaningful -//! result. Examples would be `while { ... }` or `return 44`. -//! -//! Public entry points: -//! -//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression, -//! storing the result into `dest`. This is the preferred form, if you -//! can manage it. -//! -//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding -//! `Datum` with the result. You can then store the datum, inspect -//! the value, etc. This may introduce temporaries if the datum is a -//! structural type. -//! -//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an -//! expression and ensures that the result has a cleanup associated with it, -//! creating a temporary stack slot if necessary. -//! -//! - `trans_var -> Datum`: looks up a local variable, upvar or static. - -#![allow(non_camel_case_types)] - -pub use self::Dest::*; -use self::lazy_binop_ty::*; - -use llvm::{self, ValueRef, TypeKind}; -use middle::const_qualif::ConstQualif; -use rustc::hir::def::Def; -use rustc::ty::subst::Substs; -use {_match, abi, adt, asm, base, closure, consts, controlflow}; -use base::*; -use build::*; -use callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp}; -use cleanup::{self, CleanupMethods, DropHintMethods}; -use common::*; -use datum::*; -use debuginfo::{self, DebugLoc, ToDebugLoc}; -use glue; -use machine; -use tvec; -use type_of; -use value::Value; -use Disr; -use rustc::ty::adjustment::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer}; -use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}; -use rustc::ty::adjustment::CustomCoerceUnsized; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::MethodCall; -use rustc::ty::cast::{CastKind, CastTy}; -use util::common::indenter; -use machine::{llsize_of, llsize_of_alloc}; -use type_::Type; - -use rustc::hir; - -use syntax::ast; -use syntax::parse::token::InternedString; -use syntax_pos; -use std::fmt; -use std::mem; - -// Destinations - -// These are passed around by the code generating functions to track the -// destination of a computation's value. - -#[derive(Copy, Clone, PartialEq)] -pub enum Dest { - SaveIn(ValueRef), - Ignore, -} - -impl fmt::Debug for Dest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - SaveIn(v) => write!(f, "SaveIn({:?})", Value(v)), - Ignore => f.write_str("Ignore") - } - } -} - -/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate -/// better optimized LLVM code. -pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - - expr.debug_loc().apply(bcx.fcx); - - if adjustment_required(bcx, expr) { - // use trans, which may be less efficient but - // which will perform the adjustments: - let datum = unpack_datum!(bcx, trans(bcx, expr)); - return datum.store_to_dest(bcx, dest, expr.id); - } - - let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap(); - if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) { - if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) { - if let SaveIn(lldest) = dest { - match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif, - bcx.fcx.param_substs, - consts::TrueConst::No) { - Ok(global) => { - // Cast pointer to destination, because constants - // have different types. - let lldest = PointerCast(bcx, lldest, val_ty(global)); - memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr)); - return bcx; - }, - Err(consts::ConstEvalFailure::Runtime(_)) => { - // in case const evaluation errors, translate normally - // debug assertions catch the same errors - // see RFC 1229 - }, - Err(consts::ConstEvalFailure::Compiletime(_)) => { - return bcx; - }, - } - } - - // If we see a const here, that's because it evaluates to a type with zero size. We - // should be able to just discard it, since const expressions are guaranteed not to - // have side effects. This seems to be reached through tuple struct constructors being - // passed zero-size constants. - if let hir::ExprPath(..) = expr.node { - match bcx.tcx().expect_def(expr.id) { - Def::Const(_) | Def::AssociatedConst(_) => { - assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id))); - return bcx; - } - _ => {} - } - } - - // Even if we don't have a value to emit, and the expression - // doesn't have any side-effects, we still have to translate the - // body of any closures. - // FIXME: Find a better way of handling this case. - } else { - // The only way we're going to see a `const` at this point is if - // it prefers in-place instantiation, likely because it contains - // `[x; N]` somewhere within. - match expr.node { - hir::ExprPath(..) => { - match bcx.tcx().expect_def(expr.id) { - Def::Const(did) | Def::AssociatedConst(did) => { - let empty_substs = Substs::empty(bcx.tcx()); - let const_expr = consts::get_const_expr(bcx.ccx(), did, expr, - empty_substs); - // Temporarily get cleanup scopes out of the way, - // as they require sub-expressions to be contained - // inside the current AST scope. - // These should record no cleanups anyways, `const` - // can't have destructors. - let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(), - vec![]); - // Lock emitted debug locations to the location of - // the constant reference expression. - debuginfo::with_source_location_override(bcx.fcx, - expr.debug_loc(), - || { - bcx = trans_into(bcx, const_expr, dest) - }); - let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(), - scopes); - assert!(scopes.is_empty()); - return bcx; - } - _ => {} - } - } - _ => {} - } - } - } - - debug!("trans_into() expr={:?}", expr); - - let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - expr.id, - expr.span, - false); - bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - let kind = expr_kind(bcx.tcx(), expr); - bcx = match kind { - ExprKind::Lvalue | ExprKind::RvalueDatum => { - trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id) - } - ExprKind::RvalueDps => { - trans_rvalue_dps_unadjusted(bcx, expr, dest) - } - ExprKind::RvalueStmt => { - trans_rvalue_stmt_unadjusted(bcx, expr) - } - }; - - bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id) -} - -/// Translates an expression, returning a datum (and new block) encapsulating the result. When -/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the -/// stack. -pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - debug!("trans(expr={:?})", expr); - - let mut bcx = bcx; - let fcx = bcx.fcx; - let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap(); - let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS); - let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) { - match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif, - bcx.fcx.param_substs, - consts::TrueConst::No) { - Ok(global) => { - if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) { - // Is borrowed as 'static, must return lvalue. - - // Cast pointer to global, because constants have different types. - let const_ty = expr_ty_adjusted(bcx, expr); - let llty = type_of::type_of(bcx.ccx(), const_ty); - let global = PointerCast(bcx, global, llty.ptr_to()); - let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans")); - return DatumBlock::new(bcx, datum.to_expr_datum()); - } - - // Otherwise, keep around and perform adjustments, if needed. - let const_ty = if adjusted_global { - expr_ty_adjusted(bcx, expr) - } else { - expr_ty(bcx, expr) - }; - - // This could use a better heuristic. - Some(if type_is_immediate(bcx.ccx(), const_ty) { - // Cast pointer to global, because constants have different types. - let llty = type_of::type_of(bcx.ccx(), const_ty); - let global = PointerCast(bcx, global, llty.ptr_to()); - // Maybe just get the value directly, instead of loading it? - immediate_rvalue(load_ty(bcx, global, const_ty), const_ty) - } else { - let scratch = alloc_ty(bcx, const_ty, "const"); - call_lifetime_start(bcx, scratch); - let lldest = if !const_ty.is_structural() { - // Cast pointer to slot, because constants have different types. - PointerCast(bcx, scratch, val_ty(global)) - } else { - // In this case, memcpy_ty calls llvm.memcpy after casting both - // source and destination to i8*, so we don't need any casts. - scratch - }; - memcpy_ty(bcx, lldest, global, const_ty); - Datum::new(scratch, const_ty, Rvalue::new(ByRef)) - }) - }, - Err(consts::ConstEvalFailure::Runtime(_)) => { - // in case const evaluation errors, translate normally - // debug assertions catch the same errors - // see RFC 1229 - None - }, - Err(consts::ConstEvalFailure::Compiletime(_)) => { - // generate a dummy llvm value - let const_ty = expr_ty(bcx, expr); - let llty = type_of::type_of(bcx.ccx(), const_ty); - let dummy = C_undef(llty.ptr_to()); - Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef))) - }, - } - } else { - None - }; - - let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - expr.id, - expr.span, - false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - let datum = match global { - Some(rvalue) => rvalue.to_expr_datum(), - None => unpack_datum!(bcx, trans_unadjusted(bcx, expr)) - }; - let datum = if adjusted_global { - datum // trans::consts already performed adjustments. - } else { - unpack_datum!(bcx, apply_adjustments(bcx, expr, datum)) - }; - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id); - return DatumBlock::new(bcx, datum); -} - -pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) -} - -pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) -} - -pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) { - Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr)); - Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr)); -} - -fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) -> bool { - let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() { - None => { return false; } - Some(adj) => adj - }; - - // Don't skip a conversion from Box to &T, etc. - if bcx.tcx().is_overloaded_autoderef(expr.id, 0) { - return true; - } - - match adjustment { - AdjustNeverToAny(..) => true, - AdjustReifyFnPointer => true, - AdjustUnsafeFnPointer | AdjustMutToConstPointer => { - // purely a type-level thing - false - } - AdjustDerefRef(ref adj) => { - // We are a bit paranoid about adjustments and thus might have a re- - // borrow here which merely derefs and then refs again (it might have - // a different region or mutability, but we don't care here). - !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none()) - } - } -} - -/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted -/// translation of `expr`. -fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>) - -> DatumBlock<'blk, 'tcx, Expr> -{ - let mut bcx = bcx; - let mut datum = datum; - let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() { - None => { - return DatumBlock::new(bcx, datum); - } - Some(adj) => { adj } - }; - debug!("unadjusted datum for expr {:?}: {:?} adjustment={:?}", - expr, datum, adjustment); - match adjustment { - AdjustNeverToAny(ref target) => { - let mono_target = bcx.monomorphize(target); - let llty = type_of::type_of(bcx.ccx(), mono_target); - let dummy = C_undef(llty.ptr_to()); - datum = Datum::new(dummy, mono_target, Lvalue::new("never")).to_expr_datum(); - } - AdjustReifyFnPointer => { - match datum.ty.sty { - ty::TyFnDef(def_id, substs, _) => { - datum = Callee::def(bcx.ccx(), def_id, substs) - .reify(bcx.ccx()).to_expr_datum(); - } - _ => { - bug!("{} cannot be reified to a fn ptr", datum.ty) - } - } - } - AdjustUnsafeFnPointer | AdjustMutToConstPointer => { - // purely a type-level thing - } - AdjustDerefRef(ref adj) => { - let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() { - // We are a bit paranoid about adjustments and thus might have a re- - // borrow here which merely derefs and then refs again (it might have - // a different region or mutability, but we don't care here). - match datum.ty.sty { - // Don't skip a conversion from Box to &T, etc. - ty::TyRef(..) => { - if bcx.tcx().is_overloaded_autoderef(expr.id, 0) { - // Don't skip an overloaded deref. - 0 - } else { - 1 - } - } - _ => 0 - } - } else { - 0 - }; - - if adj.autoderefs > skip_reborrows { - // Schedule cleanup. - let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id)); - datum = unpack_datum!(bcx, deref_multiple(bcx, expr, - lval.to_expr_datum(), - adj.autoderefs - skip_reborrows)); - } - - // (You might think there is a more elegant way to do this than a - // skip_reborrows bool, but then you remember that the borrow checker exists). - if skip_reborrows == 0 && adj.autoref.is_some() { - datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr)); - } - - if let Some(target) = adj.unsize { - // We do not arrange cleanup ourselves; if we already are an - // L-value, then cleanup will have already been scheduled (and - // the `datum.to_rvalue_datum` call below will emit code to zero - // the drop flag when moving out of the L-value). If we are an - // R-value, then we do not need to schedule cleanup. - let source_datum = unpack_datum!(bcx, - datum.to_rvalue_datum(bcx, "__coerce_source")); - - let target = bcx.monomorphize(&target); - - let scratch = alloc_ty(bcx, target, "__coerce_target"); - call_lifetime_start(bcx, scratch); - let target_datum = Datum::new(scratch, target, - Rvalue::new(ByRef)); - bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum); - datum = Datum::new(scratch, target, - RvalueExpr(Rvalue::new(ByRef))); - } - } - } - debug!("after adjustments, datum={:?}", datum); - DatumBlock::new(bcx, datum) -} - -fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - span: syntax_pos::Span, - source: Datum<'tcx, Rvalue>, - target: Datum<'tcx, Rvalue>) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - debug!("coerce_unsized({:?} -> {:?})", source, target); - - match (&source.ty.sty, &target.ty.sty) { - (&ty::TyBox(a), &ty::TyBox(b)) | - (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), - &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) | - (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), - &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | - (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), - &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { - let (inner_source, inner_target) = (a, b); - - let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) { - // Normally, the source is a thin pointer and we are - // adding extra info to make a fat pointer. The exception - // is when we are upcasting an existing object fat pointer - // to use a different vtable. In that case, we want to - // load out the original data pointer so we can repackage - // it. - (Load(bcx, get_dataptr(bcx, source.val)), - Some(Load(bcx, get_meta(bcx, source.val)))) - } else { - let val = if source.kind.is_by_ref() { - load_ty(bcx, source.val, source.ty) - } else { - source.val - }; - (val, None) - }; - - let info = unsized_info(bcx.ccx(), inner_source, inner_target, old_info); - - // Compute the base pointer. This doesn't change the pointer value, - // but merely its type. - let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to(); - let base = PointerCast(bcx, base, ptr_ty); - - Store(bcx, base, get_dataptr(bcx, target.val)); - Store(bcx, info, get_meta(bcx, target.val)); - } - - // This can be extended to enums and tuples in the future. - // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) | - (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => { - assert_eq!(def_id_a, def_id_b); - - // The target is already by-ref because it's to be written to. - let source = unpack_datum!(bcx, source.to_ref_datum(bcx)); - assert!(target.kind.is_by_ref()); - - let kind = custom_coerce_unsize_info(bcx.ccx().shared(), - source.ty, - target.ty); - - let repr_source = adt::represent_type(bcx.ccx(), source.ty); - let src_fields = match &*repr_source { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => span_bug!(span, - "Non univariant struct? (repr_source: {:?})", - repr_source), - }; - let repr_target = adt::represent_type(bcx.ccx(), target.ty); - let target_fields = match &*repr_target { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => span_bug!(span, - "Non univariant struct? (repr_target: {:?})", - repr_target), - }; - - let coerce_index = match kind { - CustomCoerceUnsized::Struct(i) => i - }; - assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len()); - - let source_val = adt::MaybeSizedValue::sized(source.val); - let target_val = adt::MaybeSizedValue::sized(target.val); - - let iter = src_fields.iter().zip(target_fields).enumerate(); - for (i, (src_ty, target_ty)) in iter { - let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i); - let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i); - - // If this is the field we need to coerce, recurse on it. - if i == coerce_index { - coerce_unsized(bcx, span, - Datum::new(ll_source, src_ty, - Rvalue::new(ByRef)), - Datum::new(ll_target, target_ty, - Rvalue::new(ByRef))); - } else { - // Otherwise, simply copy the data from the source. - assert!(src_ty.is_phantom_data() || src_ty == target_ty); - memcpy_ty(bcx, ll_target, ll_source, src_ty); - } - } - } - _ => bug!("coerce_unsized: invalid coercion {:?} -> {:?}", - source.ty, - target.ty) - } - bcx -} - -/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory -/// that the expr represents. -/// -/// If this expression is an rvalue, this implies introducing a temporary. In other words, -/// something like `x().f` is translated into roughly the equivalent of -/// -/// { tmp = x(); tmp.f } -pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - name: &str) - -> DatumBlock<'blk, 'tcx, Lvalue> { - let mut bcx = bcx; - let datum = unpack_datum!(bcx, trans(bcx, expr)); - return datum.to_lvalue_datum(bcx, name, expr.id); -} - -/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this -/// directly. -fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - - debug!("trans_unadjusted(expr={:?})", expr); - let _indenter = indenter(); - - expr.debug_loc().apply(bcx.fcx); - - return match expr_kind(bcx.tcx(), expr) { - ExprKind::Lvalue | ExprKind::RvalueDatum => { - let datum = unpack_datum!(bcx, { - trans_datum_unadjusted(bcx, expr) - }); - - DatumBlock {bcx: bcx, datum: datum} - } - - ExprKind::RvalueStmt => { - bcx = trans_rvalue_stmt_unadjusted(bcx, expr); - nil(bcx, expr_ty(bcx, expr)) - } - - ExprKind::RvalueDps => { - let ty = expr_ty(bcx, expr); - if type_is_zero_size(bcx.ccx(), ty) { - bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore); - nil(bcx, ty) - } else { - let scratch = rvalue_scratch_datum(bcx, ty, ""); - bcx = trans_rvalue_dps_unadjusted( - bcx, expr, SaveIn(scratch.val)); - - // Note: this is not obviously a good idea. It causes - // immediate values to be loaded immediately after a - // return from a call or other similar expression, - // which in turn leads to alloca's having shorter - // lifetimes and hence larger stack frames. However, - // in turn it can lead to more register pressure. - // Still, in practice it seems to increase - // performance, since we have fewer problems with - // morestack churn. - let scratch = unpack_datum!( - bcx, scratch.to_appropriate_datum(bcx)); - - DatumBlock::new(bcx, scratch.to_expr_datum()) - } - } - }; - - fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Expr> { - let llval = C_undef(type_of::type_of(bcx.ccx(), ty)); - let datum = immediate_rvalue(llval, ty); - DatumBlock::new(bcx, datum.to_expr_datum()) - } -} - -fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - let fcx = bcx.fcx; - let _icx = push_ctxt("trans_datum_unadjusted"); - - match expr.node { - hir::ExprType(ref e, _) => { - trans(bcx, &e) - } - hir::ExprPath(..) => { - let var = trans_var(bcx, bcx.tcx().expect_def(expr.id)); - DatumBlock::new(bcx, var.to_expr_datum()) - } - hir::ExprField(ref base, name) => { - trans_rec_field(bcx, &base, name.node) - } - hir::ExprTupField(ref base, idx) => { - trans_rec_tup_field(bcx, &base, idx.node) - } - hir::ExprIndex(ref base, ref idx) => { - trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id)) - } - hir::ExprBox(ref contents) => { - // Special case for `Box` - let box_ty = expr_ty(bcx, expr); - let contents_ty = expr_ty(bcx, &contents); - match box_ty.sty { - ty::TyBox(..) => { - trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty) - } - _ => span_bug!(expr.span, - "expected unique box") - } - - } - hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit), - hir::ExprBinary(op, ref lhs, ref rhs) => { - trans_binary(bcx, expr, op, &lhs, &rhs) - } - hir::ExprUnary(op, ref x) => { - trans_unary(bcx, expr, op, &x) - } - hir::ExprAddrOf(_, ref x) => { - match x.node { - hir::ExprRepeat(..) | hir::ExprVec(..) => { - // Special case for slices. - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - x.id, - x.span, - false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - let datum = unpack_datum!( - bcx, tvec::trans_slice_vec(bcx, expr, &x)); - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id); - DatumBlock::new(bcx, datum) - } - _ => { - trans_addr_of(bcx, expr, &x) - } - } - } - hir::ExprCast(ref val, _) => { - // Datum output mode means this is a scalar cast: - trans_imm_cast(bcx, &val, expr.id) - } - _ => { - span_bug!( - expr.span, - "trans_rvalue_datum_unadjusted reached \ - fall-through case: {:?}", - expr.node); - } - } -} - -fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - get_idx: F) - -> DatumBlock<'blk, 'tcx, Expr> where - F: FnOnce(TyCtxt<'blk, 'tcx, 'tcx>, &VariantInfo<'tcx>) -> usize, -{ - let mut bcx = bcx; - let _icx = push_ctxt("trans_rec_field"); - - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field")); - let bare_ty = base_datum.ty; - let repr = adt::represent_type(bcx.ccx(), bare_ty); - let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None); - - let ix = get_idx(bcx.tcx(), &vinfo); - let d = base_datum.get_element( - bcx, - vinfo.fields[ix].1, - |srcval| { - adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix) - }); - - if type_is_sized(bcx.tcx(), d.ty) { - DatumBlock { datum: d.to_expr_datum(), bcx: bcx } - } else { - let scratch = rvalue_scratch_datum(bcx, d.ty, ""); - Store(bcx, d.val, get_dataptr(bcx, scratch.val)); - let info = Load(bcx, get_meta(bcx, base_datum.val)); - Store(bcx, info, get_meta(bcx, scratch.val)); - - // Always generate an lvalue datum, because this pointer doesn't own - // the data and cleanup is scheduled elsewhere. - DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind))) - } -} - -/// Translates `base.field`. -fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - field: ast::Name) - -> DatumBlock<'blk, 'tcx, Expr> { - trans_field(bcx, base, |_, vinfo| vinfo.field_index(field)) -} - -/// Translates `base.`. -fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - idx: usize) - -> DatumBlock<'blk, 'tcx, Expr> { - trans_field(bcx, base, |_, _| idx) -} - -fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - index_expr: &hir::Expr, - base: &hir::Expr, - idx: &hir::Expr, - method_call: MethodCall) - -> DatumBlock<'blk, 'tcx, Expr> { - //! Translates `base[idx]`. - - let _icx = push_ctxt("trans_index"); - let ccx = bcx.ccx(); - let mut bcx = bcx; - - let index_expr_debug_loc = index_expr.debug_loc(); - - // Check for overloaded index. - let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned(); - let elt_datum = match method { - Some(method) => { - let method_ty = monomorphize_type(bcx, method.ty); - - let base_datum = unpack_datum!(bcx, trans(bcx, base)); - - // Translate index expression. - let ix_datum = unpack_datum!(bcx, trans(bcx, idx)); - - let ref_ty = // invoked methods have LB regions instantiated: - bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap(); - let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) { - None => { - span_bug!(index_expr.span, - "index method didn't return a \ - dereferenceable type?!") - } - Some(elt_tm) => elt_tm.ty, - }; - - // Overloaded. Invoke the index() method, which basically - // yields a `&T` pointer. We can then proceed down the - // normal path (below) to dereference that `&T`. - let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt"); - - bcx = Callee::method(bcx, method) - .call(bcx, index_expr_debug_loc, - ArgOverloadedOp(base_datum, Some(ix_datum)), - Some(SaveIn(scratch.val))).bcx; - - let datum = scratch.to_expr_datum(); - let lval = Lvalue::new("expr::trans_index overload"); - if type_is_sized(bcx.tcx(), elt_ty) { - Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval)) - } else { - Datum::new(datum.val, elt_ty, LvalueExpr(lval)) - } - } - None => { - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, - base, - "index")); - - // Translate index expression and cast to a suitable LLVM integer. - // Rust is less strict than LLVM in this regard. - let ix_datum = unpack_datum!(bcx, trans(bcx, idx)); - let ix_val = ix_datum.to_llscalarish(bcx); - let ix_size = machine::llbitsize_of_real(bcx.ccx(), - val_ty(ix_val)); - let int_size = machine::llbitsize_of_real(bcx.ccx(), - ccx.int_type()); - let ix_val = { - if ix_size < int_size { - if expr_ty(bcx, idx).is_signed() { - SExt(bcx, ix_val, ccx.int_type()) - } else { ZExt(bcx, ix_val, ccx.int_type()) } - } else if ix_size > int_size { - Trunc(bcx, ix_val, ccx.int_type()) - } else { - ix_val - } - }; - - let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx()); - - let (base, len) = base_datum.get_vec_base_and_len(bcx); - - debug!("trans_index: base {:?}", Value(base)); - debug!("trans_index: len {:?}", Value(len)); - - let bounds_check = ICmp(bcx, - llvm::IntUGE, - ix_val, - len, - index_expr_debug_loc); - let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - let expected = Call(bcx, - expect, - &[bounds_check, C_bool(ccx, false)], - index_expr_debug_loc); - bcx = with_cond(bcx, expected, |bcx| { - controlflow::trans_fail_bounds_check(bcx, - expr_info(index_expr), - ix_val, - len) - }); - let elt = InBoundsGEP(bcx, base, &[ix_val]); - let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to()); - let lval = Lvalue::new("expr::trans_index fallback"); - Datum::new(elt, unit_ty, LvalueExpr(lval)) - } - }; - - DatumBlock::new(bcx, elt_datum) -} - -/// Translates a reference to a variable. -pub fn trans_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: Def) - -> Datum<'tcx, Lvalue> { - - match def { - Def::Static(did, _) => consts::get_static(bcx.ccx(), did), - Def::Upvar(_, nid, _, _) => { - // Can't move upvars, so this is never a ZeroMemLastUse. - let local_ty = node_id_type(bcx, nid); - let lval = Lvalue::new_with_hint("expr::trans_var (upvar)", - bcx, nid, HintKind::ZeroAndMaintain); - match bcx.fcx.llupvars.borrow().get(&nid) { - Some(&val) => Datum::new(val, local_ty, lval), - None => { - bug!("trans_var: no llval for upvar {} found", nid); - } - } - } - Def::Local(_, nid) => { - let datum = match bcx.fcx.lllocals.borrow().get(&nid) { - Some(&v) => v, - None => { - bug!("trans_var: no datum for local/arg {} found", nid); - } - }; - debug!("take_local(nid={}, v={:?}, ty={})", - nid, Value(datum.val), datum.ty); - datum - } - _ => bug!("{:?} should not reach expr::trans_var", def) - } -} - -fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - let _icx = push_ctxt("trans_rvalue_stmt"); - - if bcx.unreachable.get() { - return bcx; - } - - expr.debug_loc().apply(bcx.fcx); - - match expr.node { - hir::ExprBreak(label_opt) => { - controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node)) - } - hir::ExprType(ref e, _) => { - trans_into(bcx, &e, Ignore) - } - hir::ExprAgain(label_opt) => { - controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node)) - } - hir::ExprRet(ref ex) => { - // Check to see if the return expression itself is reachable. - // This can occur when the inner expression contains a return - let reachable = if let Some(ref cfg) = bcx.fcx.cfg { - cfg.node_is_reachable(expr.id) - } else { - true - }; - - if reachable { - controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e)) - } else { - // If it's not reachable, just translate the inner expression - // directly. This avoids having to manage a return slot when - // it won't actually be used anyway. - if let &Some(ref x) = ex { - bcx = trans_into(bcx, &x, Ignore); - } - // Mark the end of the block as unreachable. Once we get to - // a return expression, there's no more we should be doing - // after this. - Unreachable(bcx); - bcx - } - } - hir::ExprWhile(ref cond, ref body, _) => { - controlflow::trans_while(bcx, expr, &cond, &body) - } - hir::ExprLoop(ref body, _) => { - controlflow::trans_loop(bcx, expr, &body) - } - hir::ExprAssign(ref dst, ref src) => { - let src_datum = unpack_datum!(bcx, trans(bcx, &src)); - let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign")); - - if bcx.fcx.type_needs_drop(dst_datum.ty) { - // If there are destructors involved, make sure we - // are copying from an rvalue, since that cannot possible - // alias an lvalue. We are concerned about code like: - // - // a = a - // - // but also - // - // a = a.b - // - // where e.g. a : Option and a.b : - // Option. In that case, freeing `a` before the - // assignment may also free `a.b`! - // - // We could avoid this intermediary with some analysis - // to determine whether `dst` may possibly own `src`. - expr.debug_loc().apply(bcx.fcx); - let src_datum = unpack_datum!( - bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign")); - let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx); - let opt_hint_val = opt_hint_datum.map(|d|d.to_value()); - - // 1. Drop the data at the destination, passing the - // drop-hint in case the lvalue has already been - // dropped or moved. - bcx = glue::drop_ty_core(bcx, - dst_datum.val, - dst_datum.ty, - expr.debug_loc(), - false, - opt_hint_val); - - // 2. We are overwriting the destination; ensure that - // its drop-hint (if any) says "initialized." - if let Some(hint_val) = opt_hint_val { - let hint_llval = hint_val.value(); - let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT); - Store(bcx, drop_needed, hint_llval); - } - src_datum.store_to(bcx, dst_datum.val) - } else { - src_datum.store_to(bcx, dst_datum.val) - } - } - hir::ExprAssignOp(op, ref dst, ref src) => { - let method = bcx.tcx().tables - .borrow() - .method_map - .get(&MethodCall::expr(expr.id)).cloned(); - - if let Some(method) = method { - let dst = unpack_datum!(bcx, trans(bcx, &dst)); - let src_datum = unpack_datum!(bcx, trans(bcx, &src)); - - Callee::method(bcx, method) - .call(bcx, expr.debug_loc(), - ArgOverloadedOp(dst, Some(src_datum)), None).bcx - } else { - trans_assign_op(bcx, expr, op, &dst, &src) - } - } - hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => { - let outputs = outputs.iter().map(|output| { - let out_datum = unpack_datum!(bcx, trans(bcx, output)); - unpack_datum!(bcx, out_datum.to_lvalue_datum(bcx, "out", expr.id)) - }).collect(); - let inputs = inputs.iter().map(|input| { - let input = unpack_datum!(bcx, trans(bcx, input)); - let input = unpack_datum!(bcx, input.to_rvalue_datum(bcx, "in")); - input.to_llscalarish(bcx) - }).collect(); - asm::trans_inline_asm(bcx, a, outputs, inputs); - bcx - } - _ => { - span_bug!( - expr.span, - "trans_rvalue_stmt_unadjusted reached \ - fall-through case: {:?}", - expr.node); - } - } -} - -fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_rvalue_dps_unadjusted"); - let mut bcx = bcx; - - expr.debug_loc().apply(bcx.fcx); - - // Entry into the method table if this is an overloaded call/op. - let method_call = MethodCall::expr(expr.id); - - match expr.node { - hir::ExprType(ref e, _) => { - trans_into(bcx, &e, dest) - } - hir::ExprPath(..) => { - trans_def_dps_unadjusted(bcx, expr, bcx.tcx().expect_def(expr.id), dest) - } - hir::ExprIf(ref cond, ref thn, ref els) => { - controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest) - } - hir::ExprMatch(ref discr, ref arms, _) => { - _match::trans_match(bcx, expr, &discr, &arms[..], dest) - } - hir::ExprBlock(ref blk) => { - controlflow::trans_block(bcx, &blk, dest) - } - hir::ExprStruct(_, ref fields, ref base) => { - trans_struct(bcx, - &fields[..], - base.as_ref().map(|e| &**e), - expr.span, - expr.id, - node_id_type(bcx, expr.id), - dest) - } - hir::ExprTup(ref args) => { - let numbered_fields: Vec<(usize, &hir::Expr)> = - args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect(); - trans_adt(bcx, - expr_ty(bcx, expr), - Disr(0), - &numbered_fields[..], - None, - dest, - expr.debug_loc()) - } - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitKind::Str(ref s, _) => { - tvec::trans_lit_str(bcx, expr, (*s).clone(), dest) - } - _ => { - span_bug!(expr.span, - "trans_rvalue_dps_unadjusted shouldn't be \ - translating this type of literal") - } - } - } - hir::ExprVec(..) | hir::ExprRepeat(..) => { - tvec::trans_fixed_vstore(bcx, expr, dest) - } - hir::ExprClosure(_, ref decl, ref body, _) => { - let dest = match dest { - SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest), - Ignore => closure::Dest::Ignore(bcx.ccx()) - }; - - // NB. To get the id of the closure, we don't use - // `local_def_id(id)`, but rather we extract the closure - // def-id from the expr's type. This is because this may - // be an inlined expression from another crate, and we - // want to get the ORIGINAL closure def-id, since that is - // the key we need to find the closure-kind and - // closure-type etc. - let (def_id, substs) = match expr_ty(bcx, expr).sty { - ty::TyClosure(def_id, substs) => (def_id, substs), - ref t => - span_bug!( - expr.span, - "closure expr without closure type: {:?}", t), - }; - - closure::trans_closure_expr(dest, - decl, - body, - expr.id, - def_id, - substs).unwrap_or(bcx) - } - hir::ExprCall(ref f, ref args) => { - let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned(); - let (callee, args) = if let Some(method) = method { - let mut all_args = vec![&**f]; - all_args.extend(args.iter().map(|e| &**e)); - - (Callee::method(bcx, method), ArgOverloadedCall(all_args)) - } else { - let f = unpack_datum!(bcx, trans(bcx, f)); - (match f.ty.sty { - ty::TyFnDef(def_id, substs, _) => { - Callee::def(bcx.ccx(), def_id, substs) - } - ty::TyFnPtr(_) => { - let f = unpack_datum!(bcx, - f.to_rvalue_datum(bcx, "callee")); - Callee::ptr(f) - } - _ => { - span_bug!(expr.span, - "type of callee is not a fn: {}", f.ty); - } - }, ArgExprs(&args)) - }; - callee.call(bcx, expr.debug_loc(), args, Some(dest)).bcx - } - hir::ExprMethodCall(_, _, ref args) => { - Callee::method_call(bcx, method_call) - .call(bcx, expr.debug_loc(), ArgExprs(&args), Some(dest)).bcx - } - hir::ExprBinary(op, ref lhs, ref rhs_expr) => { - // if not overloaded, would be RvalueDatumExpr - let lhs = unpack_datum!(bcx, trans(bcx, &lhs)); - let mut rhs = unpack_datum!(bcx, trans(bcx, &rhs_expr)); - if !op.node.is_by_value() { - rhs = unpack_datum!(bcx, auto_ref(bcx, rhs, rhs_expr)); - } - - Callee::method_call(bcx, method_call) - .call(bcx, expr.debug_loc(), - ArgOverloadedOp(lhs, Some(rhs)), Some(dest)).bcx - } - hir::ExprUnary(_, ref subexpr) => { - // if not overloaded, would be RvalueDatumExpr - let arg = unpack_datum!(bcx, trans(bcx, &subexpr)); - - Callee::method_call(bcx, method_call) - .call(bcx, expr.debug_loc(), - ArgOverloadedOp(arg, None), Some(dest)).bcx - } - hir::ExprCast(..) => { - // Trait casts used to come this way, now they should be coercions. - span_bug!(expr.span, "DPS expr_cast (residual trait cast?)") - } - hir::ExprAssignOp(op, _, _) => { - span_bug!( - expr.span, - "augmented assignment `{}=` should always be a rvalue_stmt", - op.node.as_str()) - } - _ => { - span_bug!( - expr.span, - "trans_rvalue_dps_unadjusted reached fall-through \ - case: {:?}", - expr.node); - } - } -} - -fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ref_expr: &hir::Expr, - def: Def, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_def_dps_unadjusted"); - - let lldest = match dest { - SaveIn(lldest) => lldest, - Ignore => { return bcx; } - }; - - let ty = expr_ty(bcx, ref_expr); - if let ty::TyFnDef(..) = ty.sty { - // Zero-sized function or ctor. - return bcx; - } - - match def { - Def::Variant(tid, vid) => { - let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid); - // Nullary variant. - let ty = expr_ty(bcx, ref_expr); - let repr = adt::represent_type(bcx.ccx(), ty); - adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val)); - bcx - } - Def::Struct(..) => { - match ty.sty { - ty::TyStruct(def, _) if def.has_dtor() => { - let repr = adt::represent_type(bcx.ccx(), ty); - adt::trans_set_discr(bcx, &repr, lldest, Disr(0)); - } - _ => {} - } - bcx - } - _ => { - span_bug!(ref_expr.span, - "Non-DPS def {:?} referened by {}", - def, bcx.node_id_to_string(ref_expr.id)); - } - } -} - -fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - fields: &[hir::Field], - base: Option<&hir::Expr>, - expr_span: syntax_pos::Span, - expr_id: ast::NodeId, - ty: Ty<'tcx>, - dest: Dest) -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_rec"); - - let tcx = bcx.tcx(); - let vinfo = VariantInfo::of_node(tcx, ty, expr_id); - - let mut need_base = vec![true; vinfo.fields.len()]; - - let numbered_fields = fields.iter().map(|field| { - let pos = vinfo.field_index(field.name.node); - need_base[pos] = false; - (pos, &*field.expr) - }).collect::>(); - - let optbase = match base { - Some(base_expr) => { - let mut leftovers = Vec::new(); - for (i, b) in need_base.iter().enumerate() { - if *b { - leftovers.push((i, vinfo.fields[i].1)); - } - } - Some(StructBaseInfo {expr: base_expr, - fields: leftovers }) - } - None => { - if need_base.iter().any(|b| *b) { - span_bug!(expr_span, "missing fields and no base expr") - } - None - } - }; - - trans_adt(bcx, - ty, - vinfo.discr, - &numbered_fields, - optbase, - dest, - DebugLoc::At(expr_id, expr_span)) -} - -/// Information that `trans_adt` needs in order to fill in the fields -/// of a struct copied from a base struct (e.g., from an expression -/// like `Foo { a: b, ..base }`. -/// -/// Note that `fields` may be empty; the base expression must always be -/// evaluated for side-effects. -pub struct StructBaseInfo<'a, 'tcx> { - /// The base expression; will be evaluated after all explicit fields. - expr: &'a hir::Expr, - /// The indices of fields to copy paired with their types. - fields: Vec<(usize, Ty<'tcx>)> -} - -/// Constructs an ADT instance: -/// -/// - `fields` should be a list of field indices paired with the -/// expression to store into that field. The initializers will be -/// evaluated in the order specified by `fields`. -/// -/// - `optbase` contains information on the base struct (if any) from -/// which remaining fields are copied; see comments on `StructBaseInfo`. -pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - discr: Disr, - fields: &[(usize, &hir::Expr)], - optbase: Option>, - dest: Dest, - debug_location: DebugLoc) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_adt"); - let fcx = bcx.fcx; - let repr = adt::represent_type(bcx.ccx(), ty); - - debug_location.apply(bcx.fcx); - - // If we don't care about the result, just make a - // temporary stack slot - let addr = match dest { - SaveIn(pos) => pos, - Ignore => { - let llresult = alloc_ty(bcx, ty, "temp"); - call_lifetime_start(bcx, llresult); - llresult - } - }; - - debug!("trans_adt"); - - // This scope holds intermediates that must be cleaned should - // panic occur before the ADT as a whole is ready. - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - - if ty.is_simd() { - // Issue 23112: The original logic appeared vulnerable to same - // order-of-eval bug. But, SIMD values are tuple-structs; - // i.e. functional record update (FRU) syntax is unavailable. - // - // To be safe, double-check that we did not get here via FRU. - assert!(optbase.is_none()); - - // This is the constructor of a SIMD type, such types are - // always primitive machine types and so do not have a - // destructor or require any clean-up. - let llty = type_of::type_of(bcx.ccx(), ty); - - // keep a vector as a register, and running through the field - // `insertelement`ing them directly into that register - // (i.e. avoid GEPi and `store`s to an alloca) . - let mut vec_val = C_undef(llty); - - for &(i, ref e) in fields { - let block_datum = trans(bcx, &e); - bcx = block_datum.bcx; - let position = C_uint(bcx.ccx(), i); - let value = block_datum.datum.to_llscalarish(bcx); - vec_val = InsertElement(bcx, vec_val, value, position); - } - Store(bcx, vec_val, addr); - } else if let Some(base) = optbase { - // Issue 23112: If there is a base, then order-of-eval - // requires field expressions eval'ed before base expression. - - // First, trans field expressions to temporary scratch values. - let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| { - let datum = unpack_datum!(bcx, trans(bcx, &e)); - (i, datum) - }).collect(); - - debug_location.apply(bcx.fcx); - - // Second, trans the base to the dest. - assert_eq!(discr, Disr(0)); - - let addr = adt::MaybeSizedValue::sized(addr); - match expr_kind(bcx.tcx(), &base.expr) { - ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => { - bcx = trans_into(bcx, &base.expr, SaveIn(addr.value)); - }, - ExprKind::RvalueStmt => { - bug!("unexpected expr kind for struct base expr") - } - _ => { - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base")); - for &(i, t) in &base.fields { - let datum = base_datum.get_element( - bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i)); - assert!(type_is_sized(bcx.tcx(), datum.ty)); - let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i); - bcx = datum.store_to(bcx, dest); - } - } - } - - // Finally, move scratch field values into actual field locations - for (i, datum) in scratch_vals { - let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i); - bcx = datum.store_to(bcx, dest); - } - } else { - // No base means we can write all fields directly in place. - let addr = adt::MaybeSizedValue::sized(addr); - for &(i, ref e) in fields { - let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i); - let e_ty = expr_ty_adjusted(bcx, &e); - bcx = trans_into(bcx, &e, SaveIn(dest)); - let scope = cleanup::CustomScope(custom_cleanup_scope); - fcx.schedule_lifetime_end(scope, dest); - // FIXME: nonzeroing move should generalize to fields - fcx.schedule_drop_mem(scope, dest, e_ty, None); - } - } - - adt::trans_set_discr(bcx, &repr, addr, discr); - - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - - // If we don't care about the result drop the temporary we made - match dest { - SaveIn(_) => bcx, - Ignore => { - bcx = glue::drop_ty(bcx, addr, ty, debug_location); - base::call_lifetime_end(bcx, addr); - bcx - } - } -} - - -fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - lit: &ast::Lit) - -> DatumBlock<'blk, 'tcx, Expr> { - // must not be a string constant, that is a RvalueDpsExpr - let _icx = push_ctxt("trans_immediate_lit"); - let ty = expr_ty(bcx, expr); - let v = consts::const_lit(bcx.ccx(), expr, lit); - immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock() -} - -fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::UnOp, - sub_expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let ccx = bcx.ccx(); - let mut bcx = bcx; - let _icx = push_ctxt("trans_unary_datum"); - - let method_call = MethodCall::expr(expr.id); - - // The only overloaded operator that is translated to a datum - // is an overloaded deref, since it is always yields a `&T`. - // Otherwise, we should be in the RvalueDpsExpr path. - assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id)); - - let un_ty = expr_ty(bcx, expr); - - let debug_loc = expr.debug_loc(); - - match op { - hir::UnNot => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc); - immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock() - } - hir::UnNeg => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - let val = datum.to_llscalarish(bcx); - let (bcx, llneg) = { - if un_ty.is_fp() { - let result = FNeg(bcx, val, debug_loc); - (bcx, result) - } else { - let is_signed = un_ty.is_signed(); - let result = Neg(bcx, val, debug_loc); - let bcx = if bcx.ccx().check_overflow() && is_signed { - let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty); - let is_min = ICmp(bcx, llvm::IntEQ, val, - C_integral(llty, min, true), debug_loc); - with_cond(bcx, is_min, |bcx| { - let msg = InternedString::new( - "attempt to negate with overflow"); - controlflow::trans_fail(bcx, expr_info(expr), msg) - }) - } else { - bcx - }; - (bcx, result) - } - }; - immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock() - } - hir::UnDeref => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - deref_once(bcx, expr, datum, method_call) - } - } -} - -fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - box_expr: &hir::Expr, - box_ty: Ty<'tcx>, - contents: &hir::Expr, - contents_ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_uniq_expr"); - let fcx = bcx.fcx; - assert!(type_is_sized(bcx.tcx(), contents_ty)); - let llty = type_of::type_of(bcx.ccx(), contents_ty); - let size = llsize_of(bcx.ccx(), llty); - let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty)); - let llty_ptr = llty.ptr_to(); - let Result { bcx, val } = malloc_raw_dyn(bcx, - llty_ptr, - box_ty, - size, - align, - box_expr.debug_loc()); - // Unique boxes do not allocate for zero-size types. The standard library - // may assume that `free` is never called on the pointer returned for - // `Box`. - let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 { - trans_into(bcx, contents, SaveIn(val)) - } else { - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope), - val, cleanup::HeapExchange, contents_ty); - let bcx = trans_into(bcx, contents, SaveIn(val)); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - bcx - }; - immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock() -} - -fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - subexpr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_addr_of"); - let mut bcx = bcx; - let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of")); - let ty = expr_ty(bcx, expr); - if !type_is_sized(bcx.tcx(), sub_datum.ty) { - // Always generate an lvalue datum, because this pointer doesn't own - // the data and cleanup is scheduled elsewhere. - DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind))) - } else { - // Sized value, ref to a thin pointer - immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock() - } -} - -fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - binop_expr: &hir::Expr, - binop_ty: Ty<'tcx>, - op: hir::BinOp, - lhs: Datum<'tcx, Rvalue>, - rhs: Datum<'tcx, Rvalue>) - -> DatumBlock<'blk, 'tcx, Expr> -{ - let _icx = push_ctxt("trans_scalar_binop"); - - let lhs_t = lhs.ty; - assert!(!lhs_t.is_simd()); - let is_float = lhs_t.is_fp(); - let is_signed = lhs_t.is_signed(); - let info = expr_info(binop_expr); - - let binop_debug_loc = binop_expr.debug_loc(); - - let mut bcx = bcx; - let lhs = lhs.to_llscalarish(bcx); - let rhs = rhs.to_llscalarish(bcx); - let val = match op.node { - hir::BiAdd => { - if is_float { - FAdd(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiSub => { - if is_float { - FSub(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiMul => { - if is_float { - FMul(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiDiv => { - if is_float { - FDiv(bcx, lhs, rhs, binop_debug_loc) - } else { - // Only zero-check integers; fp /0 is NaN - bcx = base::fail_if_zero_or_overflows(bcx, - expr_info(binop_expr), - op, - lhs, - rhs, - lhs_t); - if is_signed { - SDiv(bcx, lhs, rhs, binop_debug_loc) - } else { - UDiv(bcx, lhs, rhs, binop_debug_loc) - } - } - } - hir::BiRem => { - if is_float { - FRem(bcx, lhs, rhs, binop_debug_loc) - } else { - // Only zero-check integers; fp %0 is NaN - bcx = base::fail_if_zero_or_overflows(bcx, - expr_info(binop_expr), - op, lhs, rhs, lhs_t); - if is_signed { - SRem(bcx, lhs, rhs, binop_debug_loc) - } else { - URem(bcx, lhs, rhs, binop_debug_loc) - } - } - } - hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc), - hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc), - hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc), - hir::BiShl => { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - hir::BiShr => { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => { - base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc) - } - _ => { - span_bug!(binop_expr.span, "unexpected binop"); - } - }; - - immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock() -} - -// refinement types would obviate the need for this -#[derive(Clone, Copy)] -enum lazy_binop_ty { - lazy_and, - lazy_or, -} - - -fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - binop_expr: &hir::Expr, - op: lazy_binop_ty, - a: &hir::Expr, - b: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_lazy_binop"); - let binop_ty = expr_ty(bcx, binop_expr); - let fcx = bcx.fcx; - - let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a); - let lhs = lhs.to_llscalarish(past_lhs); - - if past_lhs.unreachable.get() { - return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock(); - } - - // If the rhs can never be reached, don't generate code for it. - if let Some(cond_val) = const_to_opt_uint(lhs) { - match (cond_val, op) { - (0, lazy_and) | - (1, lazy_or) => { - return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock(); - } - _ => { /* continue */ } - } - } - - let join = fcx.new_id_block("join", binop_expr.id); - let before_rhs = fcx.new_id_block("before_rhs", b.id); - - match op { - lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None), - lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None) - } - - let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b); - let rhs = rhs.to_llscalarish(past_rhs); - - if past_rhs.unreachable.get() { - return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock(); - } - - Br(past_rhs, join.llbb, DebugLoc::None); - let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs], - &[past_lhs.llbb, past_rhs.llbb]); - - return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock(); -} - -fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::BinOp, - lhs: &hir::Expr, - rhs: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_binary"); - let ccx = bcx.ccx(); - - // if overloaded, would be RvalueDpsExpr - assert!(!ccx.tcx().is_method_call(expr.id)); - - match op.node { - hir::BiAnd => { - trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs) - } - hir::BiOr => { - trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs) - } - _ => { - let mut bcx = bcx; - let binop_ty = expr_ty(bcx, expr); - - let lhs = unpack_datum!(bcx, trans(bcx, lhs)); - let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs")); - debug!("trans_binary (expr {}): lhs={:?}", expr.id, lhs); - let rhs = unpack_datum!(bcx, trans(bcx, rhs)); - let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs")); - debug!("trans_binary (expr {}): rhs={:?}", expr.id, rhs); - - if type_is_fat_ptr(ccx.tcx(), lhs.ty) { - assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty), - "built-in binary operators on fat pointers are homogeneous"); - assert_eq!(binop_ty, bcx.tcx().types.bool); - let val = base::compare_scalar_types( - bcx, - lhs.val, - rhs.val, - lhs.ty, - op.node, - expr.debug_loc()); - immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock() - } else { - assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty), - "built-in binary operators on fat pointers are homogeneous"); - trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs) - } - } - } -} - -pub fn cast_is_noop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - expr: &hir::Expr, - t_in: Ty<'tcx>, - t_out: Ty<'tcx>) - -> bool { - if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) { - return true; - } - - match (t_in.builtin_deref(true, ty::NoPreference), - t_out.builtin_deref(true, ty::NoPreference)) { - (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => { - t_in == t_out - } - _ => { - // This condition isn't redundant with the check for CoercionCast: - // different types can be substituted into the same type, and - // == equality can be overconservative if there are regions. - t_in == t_out - } - } -} - -fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - id: ast::NodeId) - -> DatumBlock<'blk, 'tcx, Expr> -{ - use rustc::ty::cast::CastTy::*; - use rustc::ty::cast::IntTy::*; - - fn int_cast(bcx: Block, - lldsttype: Type, - llsrctype: Type, - llsrc: ValueRef, - signed: bool) - -> ValueRef - { - let _icx = push_ctxt("int_cast"); - let srcsz = llsrctype.int_width(); - let dstsz = lldsttype.int_width(); - return if dstsz == srcsz { - BitCast(bcx, llsrc, lldsttype) - } else if srcsz > dstsz { - TruncOrBitCast(bcx, llsrc, lldsttype) - } else if signed { - SExtOrBitCast(bcx, llsrc, lldsttype) - } else { - ZExtOrBitCast(bcx, llsrc, lldsttype) - } - } - - fn float_cast(bcx: Block, - lldsttype: Type, - llsrctype: Type, - llsrc: ValueRef) - -> ValueRef - { - let _icx = push_ctxt("float_cast"); - let srcsz = llsrctype.float_width(); - let dstsz = lldsttype.float_width(); - return if dstsz > srcsz { - FPExt(bcx, llsrc, lldsttype) - } else if srcsz > dstsz { - FPTrunc(bcx, llsrc, lldsttype) - } else { llsrc }; - } - - let _icx = push_ctxt("trans_cast"); - let mut bcx = bcx; - let ccx = bcx.ccx(); - - let t_in = expr_ty_adjusted(bcx, expr); - let t_out = node_id_type(bcx, id); - - debug!("trans_cast({:?} as {:?})", t_in, t_out); - let mut ll_t_in = type_of::immediate_type_of(ccx, t_in); - let ll_t_out = type_of::immediate_type_of(ccx, t_out); - // Convert the value to be cast into a ValueRef, either by-ref or - // by-value as appropriate given its type: - let mut datum = unpack_datum!(bcx, trans(bcx, expr)); - - let datum_ty = monomorphize_type(bcx, datum.ty); - - if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) { - datum.ty = t_out; - return DatumBlock::new(bcx, datum); - } - - if type_is_fat_ptr(bcx.tcx(), t_in) { - assert!(datum.kind.is_by_ref()); - if type_is_fat_ptr(bcx.tcx(), t_out) { - return DatumBlock::new(bcx, Datum::new( - PointerCast(bcx, datum.val, ll_t_out.ptr_to()), - t_out, - Rvalue::new(ByRef) - )).to_expr_datumblock(); - } else { - // Return the address - return immediate_rvalue_bcx(bcx, - PointerCast(bcx, - Load(bcx, get_dataptr(bcx, datum.val)), - ll_t_out), - t_out).to_expr_datumblock(); - } - } - - let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast"); - let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast"); - - let (llexpr, signed) = if let Int(CEnum) = r_t_in { - let repr = adt::represent_type(ccx, t_in); - let datum = unpack_datum!( - bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id)); - let llexpr_ptr = datum.to_llref(); - let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr, - Some(Type::i64(ccx)), true); - ll_t_in = val_ty(discr); - (discr, adt::is_discr_signed(&repr)) - } else { - (datum.to_llscalarish(bcx), t_in.is_signed()) - }; - - let newval = match (r_t_in, r_t_out) { - (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => { - PointerCast(bcx, llexpr, ll_t_out) - } - (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out), - (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out), - - (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed), - (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr), - (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out), - (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out), - (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out), - (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out), - - _ => span_bug!(expr.span, - "translating unsupported cast: \ - {:?} -> {:?}", - t_in, - t_out) - }; - return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock(); -} - -fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::BinOp, - dst: &hir::Expr, - src: &hir::Expr) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_assign_op"); - let mut bcx = bcx; - - debug!("trans_assign_op(expr={:?})", expr); - - // User-defined operator methods cannot be used with `+=` etc right now - assert!(!bcx.tcx().is_method_call(expr.id)); - - // Evaluate LHS (destination), which should be an lvalue - let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op")); - assert!(!bcx.fcx.type_needs_drop(dst.ty)); - let lhs = load_ty(bcx, dst.val, dst.ty); - let lhs = immediate_rvalue(lhs, dst.ty); - - // Evaluate RHS - FIXME(#28160) this sucks - let rhs = unpack_datum!(bcx, trans(bcx, &src)); - let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs")); - - // Perform computation and store the result - let result_datum = unpack_datum!( - bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs)); - return result_datum.store_to(bcx, dst.val); -} - -fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - datum: Datum<'tcx, Expr>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - - // Ensure cleanup of `datum` if not already scheduled and obtain - // a "by ref" pointer. - let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id)); - - // Compute final type. Note that we are loose with the region and - // mutability, since those things don't matter in trans. - let referent_ty = lv_datum.ty; - let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReErased), referent_ty); - - // Construct the resulting datum. The right datum to return here would be an Lvalue datum, - // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers - // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of - // indirection and for thin pointers, this has no ill effects. - let kind = if type_is_sized(bcx.tcx(), referent_ty) { - RvalueExpr(Rvalue::new(ByValue)) - } else { - LvalueExpr(lv_datum.kind) - }; - - // Get the pointer. - let llref = lv_datum.to_llref(); - DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind)) -} - -fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>, - times: usize) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - let mut datum = datum; - for i in 0..times { - let method_call = MethodCall::autoderef(expr.id, i as u32); - datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call)); - } - DatumBlock { bcx: bcx, datum: datum } -} - -fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>, - method_call: MethodCall) - -> DatumBlock<'blk, 'tcx, Expr> { - let ccx = bcx.ccx(); - - debug!("deref_once(expr={:?}, datum={:?}, method_call={:?})", - expr, datum, method_call); - - let mut bcx = bcx; - - // Check for overloaded deref. - let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned(); - let datum = match method { - Some(method) => { - let method_ty = monomorphize_type(bcx, method.ty); - - // Overloaded. Invoke the deref() method, which basically - // converts from the `Smaht` pointer that we have into - // a `&T` pointer. We can then proceed down the normal - // path (below) to dereference that `&T`. - let datum = if method_call.autoderef == 0 { - datum - } else { - // Always perform an AutoPtr when applying an overloaded auto-deref - unpack_datum!(bcx, auto_ref(bcx, datum, expr)) - }; - - let ref_ty = // invoked methods have their LB regions instantiated - ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap(); - let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref"); - - bcx = Callee::method(bcx, method) - .call(bcx, expr.debug_loc(), - ArgOverloadedOp(datum, None), - Some(SaveIn(scratch.val))).bcx; - scratch.to_expr_datum() - } - None => { - // Not overloaded. We already have a pointer we know how to deref. - datum - } - }; - - let r = match datum.ty.sty { - ty::TyBox(content_ty) => { - // Make sure we have an lvalue datum here to get the - // proper cleanups scheduled - let datum = unpack_datum!( - bcx, datum.to_lvalue_datum(bcx, "deref", expr.id)); - - if type_is_sized(bcx.tcx(), content_ty) { - let ptr = load_ty(bcx, datum.val, datum.ty); - DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind))) - } else { - // A fat pointer and a DST lvalue have the same representation - // just different types. Since there is no temporary for `*e` - // here (because it is unsized), we cannot emulate the sized - // object code path for running drop glue and free. Instead, - // we schedule cleanup for `e`, turning it into an lvalue. - - let lval = Lvalue::new("expr::deref_once ty_uniq"); - let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval)); - DatumBlock::new(bcx, datum) - } - } - - ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) | - ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => { - let lval = Lvalue::new("expr::deref_once ptr"); - if type_is_sized(bcx.tcx(), content_ty) { - let ptr = datum.to_llscalarish(bcx); - - // Always generate an lvalue datum, even if datum.mode is - // an rvalue. This is because datum.mode is only an - // rvalue for non-owning pointers like &T or *T, in which - // case cleanup *is* scheduled elsewhere, by the true - // owner (or, in the case of *T, by the user). - DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval))) - } else { - // A fat pointer and a DST lvalue have the same representation - // just different types. - DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval))) - } - } - - _ => { - span_bug!( - expr.span, - "deref invoked on expr of invalid type {:?}", - datum.ty); - } - }; - - debug!("deref_once(expr={}, method_call={:?}, result={:?})", - expr.id, method_call, r.datum); - - return r; -} - -#[derive(Debug)] -enum OverflowOp { - Add, - Sub, - Mul, - Shl, - Shr, -} - -impl OverflowOp { - fn codegen_strategy(&self) -> OverflowCodegen { - use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck}; - match *self { - OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add), - OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub), - OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul), - - OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl), - OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr), - } - } -} - -enum OverflowCodegen { - ViaIntrinsic(OverflowOpViaIntrinsic), - ViaInputCheck(OverflowOpViaInputCheck), -} - -enum OverflowOpViaInputCheck { Shl, Shr, } - -#[derive(Debug)] -enum OverflowOpViaIntrinsic { Add, Sub, Mul, } - -impl OverflowOpViaIntrinsic { - fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef { - let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty); - bcx.ccx().get_intrinsic(&name) - } - fn to_intrinsic_name(&self, tcx: TyCtxt, ty: Ty) -> &'static str { - use syntax::ast::IntTy::*; - use syntax::ast::UintTy::*; - use rustc::ty::{TyInt, TyUint}; - - let new_sty = match ty.sty { - TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] { - "16" => TyInt(I16), - "32" => TyInt(I32), - "64" => TyInt(I64), - _ => bug!("unsupported target word size") - }, - TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] { - "16" => TyUint(U16), - "32" => TyUint(U32), - "64" => TyUint(U64), - _ => bug!("unsupported target word size") - }, - ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(), - _ => bug!("tried to get overflow intrinsic for {:?} applied to non-int type", - *self) - }; - - match *self { - OverflowOpViaIntrinsic::Add => match new_sty { - TyInt(I8) => "llvm.sadd.with.overflow.i8", - TyInt(I16) => "llvm.sadd.with.overflow.i16", - TyInt(I32) => "llvm.sadd.with.overflow.i32", - TyInt(I64) => "llvm.sadd.with.overflow.i64", - - TyUint(U8) => "llvm.uadd.with.overflow.i8", - TyUint(U16) => "llvm.uadd.with.overflow.i16", - TyUint(U32) => "llvm.uadd.with.overflow.i32", - TyUint(U64) => "llvm.uadd.with.overflow.i64", - - _ => bug!(), - }, - OverflowOpViaIntrinsic::Sub => match new_sty { - TyInt(I8) => "llvm.ssub.with.overflow.i8", - TyInt(I16) => "llvm.ssub.with.overflow.i16", - TyInt(I32) => "llvm.ssub.with.overflow.i32", - TyInt(I64) => "llvm.ssub.with.overflow.i64", - - TyUint(U8) => "llvm.usub.with.overflow.i8", - TyUint(U16) => "llvm.usub.with.overflow.i16", - TyUint(U32) => "llvm.usub.with.overflow.i32", - TyUint(U64) => "llvm.usub.with.overflow.i64", - - _ => bug!(), - }, - OverflowOpViaIntrinsic::Mul => match new_sty { - TyInt(I8) => "llvm.smul.with.overflow.i8", - TyInt(I16) => "llvm.smul.with.overflow.i16", - TyInt(I32) => "llvm.smul.with.overflow.i32", - TyInt(I64) => "llvm.smul.with.overflow.i64", - - TyUint(U8) => "llvm.umul.with.overflow.i8", - TyUint(U16) => "llvm.umul.with.overflow.i16", - TyUint(U32) => "llvm.umul.with.overflow.i32", - TyUint(U64) => "llvm.umul.with.overflow.i64", - - _ => bug!(), - }, - } - } - - fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, - info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) { - use rustc_const_math::{ConstMathErr, Op}; - - let llfn = self.to_intrinsic(bcx, lhs_t); - - let val = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc); - let result = ExtractValue(bcx, val, 0); // iN operation result - let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?" - - let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false), - binop_debug_loc); - - let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); - let expected = Call(bcx, expect, &[cond, C_bool(bcx.ccx(), false)], - binop_debug_loc); - - let op = match *self { - OverflowOpViaIntrinsic::Add => Op::Add, - OverflowOpViaIntrinsic::Sub => Op::Sub, - OverflowOpViaIntrinsic::Mul => Op::Mul - }; - - let bcx = - base::with_cond(bcx, expected, |bcx| - controlflow::trans_fail(bcx, info, - InternedString::new(ConstMathErr::Overflow(op).description()))); - - (bcx, result) - } -} - -impl OverflowOpViaInputCheck { - fn build_with_input_check<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) - { - use rustc_const_math::{ConstMathErr, Op}; - - let lhs_llty = val_ty(lhs); - let rhs_llty = val_ty(rhs); - - // Panic if any bits are set outside of bits that we always - // mask in. - // - // Note that the mask's value is derived from the LHS type - // (since that is where the 32/64 distinction is relevant) but - // the mask's type must match the RHS type (since they will - // both be fed into an and-binop) - let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true); - - let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc); - let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc); - let (result, op) = match *self { - OverflowOpViaInputCheck::Shl => - (build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc), Op::Shl), - OverflowOpViaInputCheck::Shr => - (build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), Op::Shr) - }; - let bcx = - base::with_cond(bcx, cond, |bcx| - controlflow::trans_fail(bcx, info, - InternedString::new(ConstMathErr::Overflow(op).description()))); - - (bcx, result) - } -} - -// Check if an integer or vector contains a nonzero element. -fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - value: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { - let llty = val_ty(value); - let kind = llty.kind(); - match kind { - TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc), - TypeKind::Vector => { - // Check if any elements of the vector are nonzero by treating - // it as a wide integer and checking if the integer is nonzero. - let width = llty.vector_length() as u64 * llty.element_type().int_width(); - let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width)); - build_nonzero_check(bcx, int_value, binop_debug_loc) - }, - _ => bug!("build_nonzero_check: expected Integer or Vector, found {:?}", kind), - } -} - -fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) { - if bcx.unreachable.get() { return (bcx, _Undef(lhs)); } - if bcx.ccx().check_overflow() { - - match oop.codegen_strategy() { - OverflowCodegen::ViaIntrinsic(oop) => - oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc), - OverflowCodegen::ViaInputCheck(oop) => - oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc), - } - } else { - let res = match oop { - OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc), - - OverflowOp::Shl => - build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Shr => - build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), - }; - (bcx, res) - } -} - -/// We categorize expressions into three kinds. The distinction between -/// lvalue/rvalue is fundamental to the language. The distinction between the -/// two kinds of rvalues is an artifact of trans which reflects how we will -/// generate code for that kind of expression. See trans/expr.rs for more -/// information. -#[derive(Copy, Clone)] -enum ExprKind { - Lvalue, - RvalueDps, - RvalueDatum, - RvalueStmt -} - -fn expr_kind<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, expr: &hir::Expr) -> ExprKind { - if tcx.is_method_call(expr.id) { - // Overloaded operations are generally calls, and hence they are - // generated via DPS, but there are a few exceptions: - return match expr.node { - // `a += b` has a unit result. - hir::ExprAssignOp(..) => ExprKind::RvalueStmt, - - // the deref method invoked for `*a` always yields an `&T` - hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue, - - // the index method invoked for `a[i]` always yields an `&T` - hir::ExprIndex(..) => ExprKind::Lvalue, - - // in the general case, result could be any type, use DPS - _ => ExprKind::RvalueDps - }; - } - - match expr.node { - hir::ExprPath(..) => { - match tcx.expect_def(expr.id) { - // Put functions and ctors with the ADTs, as they - // are zero-sized, so DPS is the cheapest option. - Def::Struct(..) | Def::Variant(..) | - Def::Fn(..) | Def::Method(..) => { - ExprKind::RvalueDps - } - - // Note: there is actually a good case to be made that - // DefArg's, particularly those of immediate type, ought to - // considered rvalues. - Def::Static(..) | - Def::Upvar(..) | - Def::Local(..) => ExprKind::Lvalue, - - Def::Const(..) | - Def::AssociatedConst(..) => ExprKind::RvalueDatum, - - def => { - span_bug!( - expr.span, - "uncategorized def for expr {}: {:?}", - expr.id, - def); - } - } - } - - hir::ExprType(ref expr, _) => { - expr_kind(tcx, expr) - } - - hir::ExprUnary(hir::UnDeref, _) | - hir::ExprField(..) | - hir::ExprTupField(..) | - hir::ExprIndex(..) => { - ExprKind::Lvalue - } - - hir::ExprCall(..) | - hir::ExprMethodCall(..) | - hir::ExprStruct(..) | - hir::ExprTup(..) | - hir::ExprIf(..) | - hir::ExprMatch(..) | - hir::ExprClosure(..) | - hir::ExprBlock(..) | - hir::ExprRepeat(..) | - hir::ExprVec(..) => { - ExprKind::RvalueDps - } - - hir::ExprLit(ref lit) if lit.node.is_str() => { - ExprKind::RvalueDps - } - - hir::ExprBreak(..) | - hir::ExprAgain(..) | - hir::ExprRet(..) | - hir::ExprWhile(..) | - hir::ExprLoop(..) | - hir::ExprAssign(..) | - hir::ExprInlineAsm(..) | - hir::ExprAssignOp(..) => { - ExprKind::RvalueStmt - } - - hir::ExprLit(_) | // Note: LitStr is carved out above - hir::ExprUnary(..) | - hir::ExprBox(_) | - hir::ExprAddrOf(..) | - hir::ExprBinary(..) | - hir::ExprCast(..) => { - ExprKind::RvalueDatum - } - } -} diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 080844782f205..6a072c84dd9b3 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -21,21 +21,19 @@ use rustc::ty::subst::{Substs}; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use adt; -use adt::GetDtorType; // for tcx.dtor_type() use base::*; use build::*; -use callee::{Callee, ArgVals}; -use cleanup; -use cleanup::CleanupMethods; +use callee::{Callee}; use common::*; use debuginfo::DebugLoc; -use expr; use machine::*; use monomorphize; use trans_item::TransItem; +use tvec; use type_of::{type_of, sizing_type_of, align_of}; use type_::Type; use value::Value; +use Disr; use arena::TypedArena; use syntax_pos::DUMMY_SP; @@ -51,7 +49,7 @@ pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align]; Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx())) - .call(bcx, debug_loc, ArgVals(&args), None).bcx + .call(bcx, debug_loc, &args, None).bcx } pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, @@ -133,20 +131,18 @@ pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, debug_loc: DebugLoc) -> Block<'blk, 'tcx> { - drop_ty_core(bcx, v, t, debug_loc, false, None) + drop_ty_core(bcx, v, t, debug_loc, false) } pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, debug_loc: DebugLoc, - skip_dtor: bool, - drop_hint: Option) + skip_dtor: bool) -> Block<'blk, 'tcx> { // NB: v is an *alias* of type t here, not a direct value. - debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint); + debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor); let _icx = push_ctxt("drop_ty"); - let mut bcx = bcx; if bcx.fcx.type_needs_drop(t) { let ccx = bcx.ccx(); let g = if skip_dtor { @@ -162,23 +158,8 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v }; - match drop_hint { - Some(drop_hint) => { - let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8); - let moved_val = - C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false); - let may_need_drop = - ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None); - bcx = with_cond(bcx, may_need_drop, |cx| { - Call(cx, glue, &[ptr], debug_loc); - cx - }) - } - None => { - // No drop-hint ==> call standard drop glue - Call(bcx, glue, &[ptr], debug_loc); - } - } + // No drop-hint ==> call standard drop glue + Call(bcx, glue, &[ptr], debug_loc); } bcx } @@ -193,7 +174,7 @@ pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let vp = alloc_ty(bcx, t, ""); call_lifetime_start(bcx, vp); store_ty(bcx, v, vp, t); - let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None); + let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor); call_lifetime_end(bcx, vp); bcx } @@ -240,10 +221,6 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g, TransItem::DropGlue(g).to_raw_string(), ccx.codegen_unit().name()); - - ccx.stats().n_fallback_instantiations.set(ccx.stats() - .n_fallback_instantiations - .get() + 1); } } @@ -273,7 +250,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena); - let bcx = fcx.init(false, None); + let bcx = fcx.init(false); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); // All glue functions take values passed *by alias*; this is a @@ -288,40 +265,6 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fcx.finish(bcx, DebugLoc::None); } - -fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - struct_data: ValueRef) - -> Block<'blk, 'tcx> { - assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized"); - - let repr = adt::represent_type(bcx.ccx(), t); - let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data)); - let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type()); - let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); - let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false); - - let bcx = if !bcx.ccx().check_drop_flag_for_sanity() { - bcx - } else { - let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); - let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false); - let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None); - let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None); - let drop_flag_neither_initialized_nor_cleared = - And(bcx, not_init, not_done, DebugLoc::None); - with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| { - let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap")); - Call(cx, llfn, &[], DebugLoc::None); - cx - }) - }; - - let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None); - with_cond(bcx, drop_flag_dtor_needed, |cx| { - trans_struct_drop(cx, t, struct_data) - }) -} fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, v0: ValueRef) @@ -343,14 +286,17 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. - bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t); + bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t); let (sized_args, unsized_args); let args: &[ValueRef] = if type_is_sized(tcx, t) { sized_args = [v0]; &sized_args } else { - unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))]; + unsized_args = [ + Load(bcx, get_dataptr(bcx, v0)), + Load(bcx, get_meta(bcx, v0)) + ]; &unsized_args }; @@ -364,7 +310,7 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, }; let dtor_did = def.destructor().unwrap(); bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) - .call(bcx, DebugLoc::None, ArgVals(args), None).bcx; + .call(bcx, DebugLoc::None, args, None).bcx; bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) } @@ -395,10 +341,10 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // Don't use type_of::sizing_type_of because that expects t to be sized. assert!(!t.is_simd()); let repr = adt::represent_type(ccx, t); - let sizing_type = adt::sizing_type_context_of(ccx, &repr, true); + let sizing_type = adt::sizing_type_of(ccx, &repr, true); debug!("DST {} sizing_type: {:?}", t, sizing_type); - let sized_size = llsize_of_alloc(ccx, sizing_type.prefix()); - let sized_align = llalign_of_min(ccx, sizing_type.prefix()); + let sized_size = llsize_of_alloc(ccx, sizing_type); + let sized_align = llalign_of_min(ccx, sizing_type); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); let sized_size = C_uint(ccx, sized_size); @@ -418,15 +364,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // here. But this is where the add would go.) // Return the sum of sizes and max of aligns. - let mut size = bcx.add(sized_size, unsized_size); - - // Issue #27023: If there is a drop flag, *now* we add 1 - // to the size. (We can do this without adding any - // padding because drop flags do not have any alignment - // constraints.) - if sizing_type.needs_drop_flag() { - size = bcx.add(size, C_uint(bcx.ccx(), 1_u64)); - } + let size = bcx.add(sized_size, unsized_size); // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). @@ -492,9 +430,6 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK // must definitely check for special bit-patterns corresponding to // the special dtor markings. - let inttype = Type::int(bcx.ccx()); - let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false); - match t.sty { ty::TyBox(content_ty) => { // Support for TyBox is built-in and its drop glue is @@ -502,65 +437,28 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK // a safe-guard, assert TyBox not used with TyContents. assert!(!skip_dtor); if !type_is_sized(bcx.tcx(), content_ty) { - let llval = expr::get_dataptr(bcx, v0); + let llval = get_dataptr(bcx, v0); let llbox = Load(bcx, llval); - let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx())); - let drop_flag_not_dropped_already = - ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); - with_cond(bcx, drop_flag_not_dropped_already, |bcx| { - let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); - let info = expr::get_meta(bcx, v0); - let info = Load(bcx, info); - let (llsize, llalign) = - size_and_align_of_dst(&bcx.build(), content_ty, info); - - // `Box` does not allocate. - let needs_free = ICmp(bcx, - llvm::IntNE, - llsize, - C_uint(bcx.ccx(), 0u64), - DebugLoc::None); - with_cond(bcx, needs_free, |bcx| { - trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) - }) + let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); + let info = get_meta(bcx, v0); + let info = Load(bcx, info); + let (llsize, llalign) = + size_and_align_of_dst(&bcx.build(), content_ty, info); + + // `Box` does not allocate. + let needs_free = ICmp(bcx, + llvm::IntNE, + llsize, + C_uint(bcx.ccx(), 0u64), + DebugLoc::None); + with_cond(bcx, needs_free, |bcx| { + trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) }) } else { let llval = v0; let llbox = Load(bcx, llval); - let llbox_as_usize = PtrToInt(bcx, llbox, inttype); - let drop_flag_not_dropped_already = - ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); - with_cond(bcx, drop_flag_not_dropped_already, |bcx| { - let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); - trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) - }) - } - } - ty::TyStruct(def, _) | ty::TyEnum(def, _) => { - match (def.dtor_kind(), skip_dtor) { - (ty::TraitDtor(true), false) => { - // FIXME(16758) Since the struct is unsized, it is hard to - // find the drop flag (which is at the end of the struct). - // Lets just ignore the flag and pretend everything will be - // OK. - if type_is_sized(bcx.tcx(), t) { - trans_struct_drop_flag(bcx, t, v0) - } else { - // Give the user a heads up that we are doing something - // stupid and dangerous. - bcx.sess().warn(&format!("Ignoring drop flag in destructor for {} \ - because the struct is unsized. See issue \ - #16758", t)); - trans_struct_drop(bcx, t, v0) - } - } - (ty::TraitDtor(false), false) => { - trans_struct_drop(bcx, t, v0) - } - (ty::NoDtor, _) | (_, true) => { - // No dtor? Just the default case - iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None)) - } + let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); + trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) } } ty::TyTrait(..) => { @@ -568,8 +466,8 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK // versus without calling Drop::drop. Assert caller is // okay with always calling the Drop impl, if any. assert!(!skip_dtor); - let data_ptr = expr::get_dataptr(bcx, v0); - let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0)); + let data_ptr = get_dataptr(bcx, v0); + let vtable_ptr = Load(bcx, get_meta(bcx, v0)); let dtor = Load(bcx, vtable_ptr); Call(bcx, dtor, @@ -577,15 +475,159 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK DebugLoc::None); bcx } + ty::TyStruct(def, _) | ty::TyEnum(def, _) + if def.dtor_kind().is_present() && !skip_dtor => { + trans_struct_drop(bcx, t, v0) + } _ => { if bcx.fcx.type_needs_drop(t) { - iter_structural_ty(bcx, - v0, - t, - |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None)) + drop_structural_ty(bcx, v0, t) } else { bcx } } } } + +// Iterates through the elements of a structural type, dropping them. +fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + av: ValueRef, + t: Ty<'tcx>) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("drop_structural_ty"); + + fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + repr: &adt::Repr<'tcx>, + av: adt::MaybeSizedValue, + variant: ty::VariantDef<'tcx>, + substs: &Substs<'tcx>) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("iter_variant"); + let tcx = cx.tcx(); + let mut cx = cx; + + for (i, field) in variant.fields.iter().enumerate() { + let arg = monomorphize::field_ty(tcx, substs, field); + cx = drop_ty(cx, + adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i), + arg, DebugLoc::None); + } + return cx; + } + + let value = if type_is_sized(cx.tcx(), t) { + adt::MaybeSizedValue::sized(av) + } else { + let data = Load(cx, get_dataptr(cx, av)); + let info = Load(cx, get_meta(cx, av)); + adt::MaybeSizedValue::unsized_(data, info) + }; + + let mut cx = cx; + match t.sty { + ty::TyStruct(..) => { + let repr = adt::represent_type(cx.ccx(), t); + let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); + for (i, &Field(_, field_ty)) in fields.iter().enumerate() { + let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i); + + let val = if type_is_sized(cx.tcx(), field_ty) { + llfld_a + } else { + let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter"); + Store(cx, llfld_a, get_dataptr(cx, scratch)); + Store(cx, value.meta, get_meta(cx, scratch)); + scratch + }; + cx = drop_ty(cx, val, field_ty, DebugLoc::None); + } + } + ty::TyClosure(_, ref substs) => { + let repr = adt::represent_type(cx.ccx(), t); + for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() { + let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i); + cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None); + } + } + ty::TyArray(_, n) => { + let base = get_dataptr(cx, value.value); + let len = C_uint(cx.ccx(), n); + let unit_ty = t.sequence_element_type(cx.tcx()); + cx = tvec::slice_for_each(cx, base, unit_ty, len, + |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + } + ty::TySlice(_) | ty::TyStr => { + let unit_ty = t.sequence_element_type(cx.tcx()); + cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta, + |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + } + ty::TyTuple(ref args) => { + let repr = adt::represent_type(cx.ccx(), t); + for (i, arg) in args.iter().enumerate() { + let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i); + cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None); + } + } + ty::TyEnum(en, substs) => { + let fcx = cx.fcx; + let ccx = fcx.ccx; + + let repr = adt::represent_type(ccx, t); + let n_variants = en.variants.len(); + + // NB: we must hit the discriminant first so that structural + // comparison know not to proceed when the discriminants differ. + + match adt::trans_switch(cx, &repr, av, false) { + (adt::BranchKind::Single, None) => { + if n_variants != 0 { + assert!(n_variants == 1); + cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av), + &en.variants[0], substs); + } + } + (adt::BranchKind::Switch, Some(lldiscrim_a)) => { + cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None); + + // Create a fall-through basic block for the "else" case of + // the switch instruction we're about to generate. Note that + // we do **not** use an Unreachable instruction here, even + // though most of the time this basic block will never be hit. + // + // When an enum is dropped it's contents are currently + // overwritten to DTOR_DONE, which means the discriminant + // could have changed value to something not within the actual + // range of the discriminant. Currently this function is only + // used for drop glue so in this case we just return quickly + // from the outer function, and any other use case will only + // call this for an already-valid enum in which case the `ret + // void` will never be hit. + let ret_void_cx = fcx.new_block("enum-iter-ret-void"); + RetVoid(ret_void_cx, DebugLoc::None); + let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); + let next_cx = fcx.new_block("enum-iter-next"); + + for variant in &en.variants { + let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}", + &variant.disr_val + .to_string())); + let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val)); + AddCase(llswitch, case_val, variant_cx.llbb); + let variant_cx = iter_variant(variant_cx, + &repr, + value, + variant, + substs); + Br(variant_cx, next_cx.llbb, DebugLoc::None); + } + cx = next_cx; + } + _ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + } + } + _ => { + cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t)) + } + } + return cx; +} diff --git a/src/librustc_trans/inline.rs b/src/librustc_trans/inline.rs deleted file mode 100644 index 8581fccf10ab5..0000000000000 --- a/src/librustc_trans/inline.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::hir::def_id::DefId; -use base::push_ctxt; -use common::*; -use monomorphize::Instance; - -use rustc::dep_graph::DepNode; - -fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { - debug!("instantiate_inline({:?})", fn_id); - let _icx = push_ctxt("instantiate_inline"); - let tcx = ccx.tcx(); - let _task = tcx.dep_graph.in_task(DepNode::TransInlinedItem(fn_id)); - - tcx.sess - .cstore - .maybe_get_item_ast(tcx, fn_id) - .map(|(_, inline_id)| { - tcx.map.local_def_id(inline_id) - }) -} - -pub fn get_local_instance(ccx: &CrateContext, fn_id: DefId) - -> Option { - if let Some(_) = ccx.tcx().map.as_local_node_id(fn_id) { - Some(fn_id) - } else { - instantiate_inline(ccx, fn_id) - } -} - -pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> DefId { - get_local_instance(ccx, fn_id).unwrap_or(fn_id) -} - -pub fn maybe_inline_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - instance: Instance<'tcx>) -> Instance<'tcx> { - let def_id = maybe_instantiate_inline(ccx, instance.def); - Instance { - def: def_id, - substs: instance.substs - } -} diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index ecee470551059..7faff98aea442 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -14,21 +14,14 @@ use arena::TypedArena; use intrinsics::{self, Intrinsic}; use libc; use llvm; -use llvm::{ValueRef, TypeKind}; -use rustc::ty::subst::Substs; +use llvm::{ValueRef}; use abi::{Abi, FnType}; use adt; use base::*; use build::*; -use callee::{self, Callee}; -use cleanup; -use cleanup::CleanupMethods; use common::*; -use consts; -use datum::*; use debuginfo::DebugLoc; use declare; -use expr; use glue; use type_of; use machine; @@ -37,11 +30,9 @@ use rustc::ty::{self, Ty}; use Disr; use rustc::hir; use syntax::ast; -use syntax::ptr::P; use syntax::parse::token; use rustc::session::Session; -use rustc_const_eval::fatal_const_eval_err; use syntax_pos::{Span, DUMMY_SP}; use std::cmp::Ordering; @@ -98,8 +89,8 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, callee_ty: Ty<'tcx>, fn_ty: &FnType, - args: callee::CallArgs<'a, 'tcx>, - dest: expr::Dest, + llargs: &[ValueRef], + llresult: ValueRef, call_debug_location: DebugLoc) -> Result<'blk, 'tcx> { let fcx = bcx.fcx; @@ -120,217 +111,26 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let name = tcx.item_name(def_id).as_str(); let span = match call_debug_location { - DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span, + DebugLoc::ScopeAt(_, span) => span, DebugLoc::None => { span_bug!(fcx.span.unwrap_or(DUMMY_SP), "intrinsic `{}` called with missing span", name); } }; - let cleanup_scope = fcx.push_custom_cleanup_scope(); - - // For `transmute` we can just trans the input expr directly into dest - if name == "transmute" { - let llret_ty = type_of::type_of(ccx, ret_ty); - match args { - callee::ArgExprs(arg_exprs) => { - assert_eq!(arg_exprs.len(), 1); - - let (in_type, out_type) = (substs.types[0], - substs.types[1]); - let llintype = type_of::type_of(ccx, in_type); - let llouttype = type_of::type_of(ccx, out_type); - - let in_type_size = machine::llbitsize_of_real(ccx, llintype); - let out_type_size = machine::llbitsize_of_real(ccx, llouttype); - - if let ty::TyFnDef(def_id, substs, _) = in_type.sty { - if out_type_size != 0 { - // FIXME #19925 Remove this hack after a release cycle. - let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0])); - let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val; - let llfnty = val_ty(llfn); - let llresult = match dest { - expr::SaveIn(d) => d, - expr::Ignore => alloc_ty(bcx, out_type, "ret") - }; - Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to())); - if dest == expr::Ignore { - bcx = glue::drop_ty(bcx, llresult, out_type, - call_debug_location); - } - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - return Result::new(bcx, llresult); - } - } - - // This should be caught by the intrinsicck pass - assert_eq!(in_type_size, out_type_size); - - let nonpointer_nonaggregate = |llkind: TypeKind| -> bool { - use llvm::TypeKind::*; - match llkind { - Half | Float | Double | X86_FP80 | FP128 | - PPC_FP128 | Integer | Vector | X86_MMX => true, - _ => false - } - }; - - // An approximation to which types can be directly cast via - // LLVM's bitcast. This doesn't cover pointer -> pointer casts, - // but does, importantly, cover SIMD types. - let in_kind = llintype.kind(); - let ret_kind = llret_ty.kind(); - let bitcast_compatible = - (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || { - in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer - }; - - let dest = if bitcast_compatible { - // if we're here, the type is scalar-like (a primitive, a - // SIMD type or a pointer), and so can be handled as a - // by-value ValueRef and can also be directly bitcast to the - // target type. Doing this special case makes conversions - // like `u32x4` -> `u64x2` much nicer for LLVM and so more - // efficient (these are done efficiently implicitly in C - // with the `__m128i` type and so this means Rust doesn't - // lose out there). - let expr = &arg_exprs[0]; - let datum = unpack_datum!(bcx, expr::trans(bcx, expr)); - let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp")); - let val = if datum.kind.is_by_ref() { - load_ty(bcx, datum.val, datum.ty) - } else { - from_immediate(bcx, datum.val) - }; - - let cast_val = BitCast(bcx, val, llret_ty); - - match dest { - expr::SaveIn(d) => { - // this often occurs in a sequence like `Store(val, - // d); val2 = Load(d)`, so disappears easily. - Store(bcx, cast_val, d); - } - expr::Ignore => {} - } - dest - } else { - // The types are too complicated to do with a by-value - // bitcast, so pointer cast instead. We need to cast the - // dest so the types work out. - let dest = match dest { - expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())), - expr::Ignore => expr::Ignore - }; - bcx = expr::trans_into(bcx, &arg_exprs[0], dest); - dest - }; - - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - - return match dest { - expr::SaveIn(d) => Result::new(bcx, d), - expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to())) - }; - - } - - _ => { - bug!("expected expr as argument for transmute"); - } - } - } - - // For `move_val_init` we can evaluate the destination address - // (the first argument) and then trans the source value (the - // second argument) directly into the resulting destination - // address. - if name == "move_val_init" { - if let callee::ArgExprs(ref exprs) = args { - let (dest_expr, source_expr) = if exprs.len() != 2 { - bug!("expected two exprs as arguments for `move_val_init` intrinsic"); - } else { - (&exprs[0], &exprs[1]) - }; - - // evaluate destination address - let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr)); - let dest_datum = unpack_datum!( - bcx, dest_datum.to_rvalue_datum(bcx, "arg")); - let dest_datum = unpack_datum!( - bcx, dest_datum.to_appropriate_datum(bcx)); - - // `expr::trans_into(bcx, expr, dest)` is equiv to - // - // `trans(bcx, expr).store_to_dest(dest)`, - // - // which for `dest == expr::SaveIn(addr)`, is equivalent to: - // - // `trans(bcx, expr).store_to(bcx, addr)`. - let lldest = expr::Dest::SaveIn(dest_datum.val); - bcx = expr::trans_into(bcx, source_expr, lldest); - - let llresult = C_nil(ccx); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - - return Result::new(bcx, llresult); - } else { - bug!("expected two exprs as arguments for `move_val_init` intrinsic"); - } - } - - // save the actual AST arguments for later (some places need to do - // const-evaluation on them) - let expr_arguments = match args { - callee::ArgExprs(args) => Some(args), - _ => None, - }; - - // Push the arguments. - let mut llargs = Vec::new(); - bcx = callee::trans_args(bcx, - Abi::RustIntrinsic, - fn_ty, - &mut callee::Intrinsic, - args, - &mut llargs, - cleanup::CustomScope(cleanup_scope)); - - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - // These are the only intrinsic functions that diverge. if name == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); Call(bcx, llfn, &[], call_debug_location); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); Unreachable(bcx); return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); } else if &name[..] == "unreachable" { - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); Unreachable(bcx); return Result::new(bcx, C_nil(ccx)); } let llret_ty = type_of::type_of(ccx, ret_ty); - // Get location to store the result. If the user does - // not care about the result, just make a stack slot - let llresult = match dest { - expr::SaveIn(d) => d, - expr::Ignore => { - if !type_is_zero_size(ccx, ret_ty) { - let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result"); - call_lifetime_start(bcx, llresult); - llresult - } else { - C_undef(llret_ty.ptr_to()) - } - } - }; - let simple = get_simple_intrinsic(ccx, &name); let llval = match (simple, &name[..]) { (Some(llfn), _) => { @@ -382,16 +182,20 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } (_, "drop_in_place") => { let tp_ty = substs.types[0]; - let ptr = if type_is_sized(tcx, tp_ty) { + let is_sized = type_is_sized(tcx, tp_ty); + let ptr = if is_sized { llargs[0] } else { - let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp"); - Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val)); - Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val)); - fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val); - scratch.val + let scratch = alloc_ty(bcx, tp_ty, "drop"); + call_lifetime_start(bcx, scratch); + Store(bcx, llargs[0], get_dataptr(bcx, scratch)); + Store(bcx, llargs[1], get_meta(bcx, scratch)); + scratch }; glue::drop_ty(bcx, ptr, tp_ty, call_debug_location); + if !is_sized { + call_lifetime_end(bcx, ptr); + } C_nil(ccx) } (_, "type_name") => { @@ -402,13 +206,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "type_id") => { C_u64(ccx, ccx.tcx().type_id_hash(substs.types[0])) } - (_, "init_dropped") => { - let tp_ty = substs.types[0]; - if !type_is_zero_size(ccx, tp_ty) { - drop_done_fill_mem(bcx, llresult, tp_ty); - } - C_nil(ccx) - } (_, "init") => { let tp_ty = substs.types[0]; if !type_is_zero_size(ccx, tp_ty) { @@ -511,8 +308,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "volatile_store") => { let tp_ty = substs.types[0]; if type_is_fat_ptr(bcx.tcx(), tp_ty) { - VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0])); - VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0])); + VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0])); + VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0])); } else { let val = if fn_ty.args[1].is_indirect() { Load(bcx, llargs[1]) @@ -621,9 +418,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } (_, name) if name.starts_with("simd_") => { generic_simd_intrinsic(bcx, name, - substs, callee_ty, - expr_arguments, &llargs, ret_ty, llret_ty, call_debug_location, @@ -868,13 +663,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let llargs = if !any_changes_needed { // no aggregates to flatten, so no change needed - llargs + llargs.to_vec() } else { // there are some aggregates that need to be flattened // in the LLVM call, so we need to run over the types // again to find them and extract the arguments intr.inputs.iter() - .zip(&llargs) + .zip(llargs) .zip(&arg_tys) .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg)) .collect() @@ -919,17 +714,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } - // If we made a temporary stack slot, let's clean it up - match dest { - expr::Ignore => { - bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location); - call_lifetime_end(bcx, llresult); - } - expr::SaveIn(_) => {} - } - - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - Result::new(bcx, llresult) } @@ -1064,10 +848,10 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, SetPersonalityFn(bcx, bcx.fcx.eh_personality()); - let normal = bcx.fcx.new_temp_block("normal"); - let catchswitch = bcx.fcx.new_temp_block("catchswitch"); - let catchpad = bcx.fcx.new_temp_block("catchpad"); - let caught = bcx.fcx.new_temp_block("caught"); + let normal = bcx.fcx.new_block("normal"); + let catchswitch = bcx.fcx.new_block("catchswitch"); + let catchpad = bcx.fcx.new_block("catchpad"); + let caught = bcx.fcx.new_block("caught"); let func = llvm::get_param(bcx.fcx.llfn, 0); let data = llvm::get_param(bcx.fcx.llfn, 1); @@ -1123,7 +907,7 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let tcx = ccx.tcx(); let tydesc = match tcx.lang_items.msvc_try_filter() { - Some(did) => ::consts::get_static(ccx, did).to_llref(), + Some(did) => ::consts::get_static(ccx, did), None => bug!("msvc_try_filter not defined"), }; let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]); @@ -1184,8 +968,8 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.fcx.new_temp_block("then"); - let catch = bcx.fcx.new_temp_block("catch"); + let then = bcx.fcx.new_block("then"); + let catch = bcx.fcx.new_block("catch"); let func = llvm::get_param(bcx.fcx.llfn, 0); let data = llvm::get_param(bcx.fcx.llfn, 1); @@ -1240,8 +1024,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, let (fcx, block_arena); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let bcx = fcx.init(true, None); - trans(bcx); + trans(fcx.init(true)); fcx.cleanup(); llfn } @@ -1283,9 +1066,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { fn generic_simd_intrinsic<'blk, 'tcx, 'a> (bcx: Block<'blk, 'tcx>, name: &str, - substs: &'tcx Substs<'tcx>, callee_ty: Ty<'tcx>, - args: Option<&[P]>, llargs: &[ValueRef], ret_ty: Ty<'tcx>, llret_ty: Type, @@ -1386,20 +1167,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> let total_len = in_len as u64 * 2; - let vector = match args { - Some(args) => { - match consts::const_expr(bcx.ccx(), &args[2], substs, None, - // this should probably help simd error reporting - consts::TrueConst::Yes) { - Ok((vector, _)) => vector, - Err(err) => { - fatal_const_eval_err(bcx.tcx(), err.as_inner(), span, - "shuffle indices"); - } - } - } - None => llargs[2] - }; + let vector = llargs[2]; let indices: Option> = (0..n) .map(|i| { diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 81a1dbeb7fe74..1286df7b97e67 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -110,17 +110,12 @@ mod collector; mod common; mod consts; mod context; -mod controlflow; -mod datum; mod debuginfo; mod declare; mod disr; -mod expr; mod glue; -mod inline; mod intrinsic; mod machine; -mod _match; mod meth; mod mir; mod monomorphize; diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index b051028ebda6b..97c77ee3d8c72 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -20,13 +20,12 @@ use rustc::traits::{self, Reveal}; use abi::FnType; use base::*; use build::*; -use callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim}; +use callee::{Callee, Virtual, trans_fn_pointer_shim}; use closure; use common::*; use consts; use debuginfo::DebugLoc; use declare; -use expr; use glue; use machine; use type_::Type; @@ -96,25 +95,21 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false, None); - assert!(!fcx.needs_ret_allocas); + let mut bcx = fcx.init(false); - - let dest = - fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))); + let dest = fcx.llretslotptr.get(); debug!("trans_object_shim: method_offset_in_vtable={}", vtable_index); let llargs = get_params(fcx.llfn); - let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]); let callee = Callee { data: Virtual(vtable_index), ty: method_ty }; - bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx; + bcx = callee.call(bcx, DebugLoc::None, + &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx; fcx.finish(bcx, DebugLoc::None); @@ -160,7 +155,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, get_vtable_methods(tcx, id, substs) .into_iter() .map(|opt_mth| opt_mth.map_or(nullptr, |mth| { - Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val + Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx) })) .collect::>() .into_iter() diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 56d02fa1fac4f..3ab4290e7b9b9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -9,7 +9,7 @@ // except according to those terms. use llvm::{self, ValueRef}; -use rustc_const_eval::ErrKind; +use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err}; use rustc::middle::lang_items; use rustc::ty; use rustc::mir::repr as mir; @@ -78,7 +78,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.fcx.new_block(name, None).build(); + let trampoline = this.fcx.new_block(name).build(); trampoline.set_personality_fn(this.fcx.eh_personality()); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() @@ -291,7 +291,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); - let panic_block = self.fcx.new_block("panic", None); + let panic_block = self.fcx.new_block("panic"); if expected { bcx.cond_br(cond, lltarget, panic_block.llbb); } else { @@ -354,9 +354,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // is also constant, then we can produce a warning. if const_cond == Some(!expected) { if let Some(err) = const_err { - let _ = consts::const_err(bcx.ccx(), span, - Err::<(), _>(err), - consts::TrueConst::No); + let err = ConstEvalErr{ span: span, kind: err }; + let mut diag = bcx.tcx().sess.struct_span_warn( + span, "this expression will panic at run-time"); + note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); + diag.emit(); } } @@ -364,7 +366,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); let callee = Callee::def(bcx.ccx(), def_id, bcx.ccx().empty_substs_for_def_id(def_id)); - let llfn = callee.reify(bcx.ccx()).val; + let llfn = callee.reify(bcx.ccx()); // Translate the actual panic invoke/call. if let Some(unwind) = cleanup { @@ -497,28 +499,27 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let fn_ptr = match callee.data { NamedTupleConstructor(_) => { // FIXME translate this like mir::Rvalue::Aggregate. - callee.reify(bcx.ccx()).val + callee.reify(bcx.ccx()) } Intrinsic => { - use callee::ArgVals; - use expr::{Ignore, SaveIn}; use intrinsic::trans_intrinsic_call; let (dest, llargs) = match ret_dest { _ if fn_ty.ret.is_indirect() => { - (SaveIn(llargs[0]), &llargs[1..]) + (llargs[0], &llargs[1..]) + } + ReturnDest::Nothing => { + (C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..]) } - ReturnDest::Nothing => (Ignore, &llargs[..]), ReturnDest::IndirectOperand(dst, _) | - ReturnDest::Store(dst) => (SaveIn(dst), &llargs[..]), + ReturnDest::Store(dst) => (dst, &llargs[..]), ReturnDest::DirectOperand(_) => bug!("Cannot use direct operand with an intrinsic call") }; bcx.with_block(|bcx| { trans_intrinsic_call(bcx, callee.ty, &fn_ty, - ArgVals(llargs), dest, - debug_loc); + &llargs, dest, debug_loc); }); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { @@ -766,7 +767,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let target = self.bcx(target_bb); - let block = self.fcx.new_block("cleanup", None); + let block = self.fcx.new_block("cleanup"); self.landing_pads[target_bb] = Some(block); let bcx = block.build(); @@ -809,7 +810,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { self.unreachable_block.unwrap_or_else(|| { - let bl = self.fcx.new_block("unreachable", None); + let bl = self.fcx.new_block("unreachable"); bl.build().unreachable(); self.unreachable_block = Some(bl); bl @@ -878,10 +879,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if out_type_size != 0 { // FIXME #19925 Remove this hack after a release cycle. let f = Callee::def(bcx.ccx(), def_id, substs); - let datum = f.reify(bcx.ccx()); + let ty = match f.ty.sty { + ty::TyFnDef(_, _, f) => bcx.tcx().mk_fn_ptr(f), + _ => f.ty + }; val = OperandRef { - val: Immediate(datum.val), - ty: datum.ty + val: Immediate(f.reify(bcx.ccx())), + ty: ty }; } } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 4da973bb7f946..1badfdba6603f 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -10,10 +10,10 @@ use llvm::{self, ValueRef}; use rustc::middle::const_val::ConstVal; -use rustc_const_eval::ErrKind; +use rustc_const_eval::{ErrKind, ConstEvalErr, report_const_eval_err}; use rustc_const_math::ConstInt::*; use rustc_const_math::ConstFloat::*; -use rustc_const_math::ConstMathErr; +use rustc_const_math::{ConstInt, ConstIsize, ConstUsize, ConstMathErr}; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; use rustc::mir::repr as mir; @@ -28,12 +28,14 @@ use callee::Callee; use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; -use consts::{self, ConstEvalFailure, TrueConst, to_const_int}; +use common::{const_to_opt_int, const_to_opt_uint}; +use consts; use monomorphize::{self, Instance}; use type_of; use type_::Type; use value::Value; +use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; use std::ptr; @@ -230,7 +232,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn trans_def(ccx: &'a CrateContext<'a, 'tcx>, mut instance: Instance<'tcx>, args: IndexVec>) - -> Result, ConstEvalFailure> { + -> Result, ConstEvalErr> { // Try to resolve associated constants. if let Some(trait_id) = ccx.tcx().trait_of_item(instance.def) { let trait_ref = ty::TraitRef::new(trait_id, instance.substs); @@ -261,7 +263,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { value) } - fn trans(&mut self) -> Result, ConstEvalFailure> { + fn trans(&mut self) -> Result, ConstEvalErr> { let tcx = self.ccx.tcx(); let mut bb = mir::START_BLOCK; @@ -320,10 +322,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { ErrKind::Math(err.clone()) } }; - match consts::const_err(self.ccx, span, Err(err), TrueConst::Yes) { - Ok(()) => {} - Err(err) => if failure.is_ok() { failure = Err(err); } - } + + let err = ConstEvalErr{ span: span, kind: err }; + report_const_eval_err(tcx, &err, span, "expression").emit(); + failure = Err(err); } target } @@ -370,7 +372,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) - -> Result, ConstEvalFailure> { + -> Result, ConstEvalErr> { let tcx = self.ccx.tcx(); if let Some(index) = self.mir.local_index(lvalue) { @@ -386,7 +388,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::Lvalue::ReturnPointer => bug!(), // handled above mir::Lvalue::Static(def_id) => { ConstLvalue { - base: Base::Static(consts::get_static(self.ccx, def_id).val), + base: Base::Static(consts::get_static(self.ccx, def_id)), llextra: ptr::null_mut(), ty: lvalue.ty(self.mir, tcx).to_ty(tcx) } @@ -411,11 +413,18 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } else if let ty::TyStr = projected_ty.sty { (Base::Str(base), extra) } else { - let val = consts::load_const(self.ccx, base, projected_ty); + let v = base; + let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v); + let mut val = unsafe { llvm::LLVMGetInitializer(v) }; if val.is_null() { span_bug!(span, "dereference of non-constant pointer `{:?}`", Value(base)); } + if projected_ty.is_bool() { + unsafe { + val = llvm::LLVMConstTrunc(val, Type::i1(self.ccx).to_ref()); + } + } (Base::Value(val), extra) } } @@ -462,7 +471,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span) - -> Result, ConstEvalFailure> { + -> Result, ConstEvalErr> { match *operand { mir::Operand::Consume(ref lvalue) => { Ok(self.const_lvalue(lvalue, span)?.to_const(span)) @@ -497,7 +506,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, dest_ty: Ty<'tcx>, span: Span) - -> Result, ConstEvalFailure> { + -> Result, ConstEvalErr> { let tcx = self.ccx.tcx(); let val = match *rvalue { mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?, @@ -565,7 +574,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { match operand.ty.sty { ty::TyFnDef(def_id, substs, _) => { Callee::def(self.ccx, def_id, substs) - .reify(self.ccx).val + .reify(self.ccx) } _ => { span_bug!(span, "{} cannot be reified to a fn ptr", @@ -782,6 +791,54 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } +fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option { + match t.sty { + ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type { + ast::IntTy::I8 => { + assert_eq!(input as i8 as i64, input); + Some(ConstInt::I8(input as i8)) + }, + ast::IntTy::I16 => { + assert_eq!(input as i16 as i64, input); + Some(ConstInt::I16(input as i16)) + }, + ast::IntTy::I32 => { + assert_eq!(input as i32 as i64, input); + Some(ConstInt::I32(input as i32)) + }, + ast::IntTy::I64 => { + Some(ConstInt::I64(input)) + }, + ast::IntTy::Is => { + ConstIsize::new(input, tcx.sess.target.int_type) + .ok().map(ConstInt::Isize) + }, + }), + ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type { + ast::UintTy::U8 => { + assert_eq!(input as u8 as u64, input); + Some(ConstInt::U8(input as u8)) + }, + ast::UintTy::U16 => { + assert_eq!(input as u16 as u64, input); + Some(ConstInt::U16(input as u16)) + }, + ast::UintTy::U32 => { + assert_eq!(input as u32 as u64, input); + Some(ConstInt::U32(input as u32)) + }, + ast::UintTy::U64 => { + Some(ConstInt::U64(input)) + }, + ast::UintTy::Us => { + ConstUsize::new(input, tcx.sess.target.uint_type) + .ok().map(ConstInt::Usize) + }, + }), + _ => None, + } +} + pub fn const_scalar_binop(op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -902,25 +959,17 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } }; - match result { - Ok(v) => v, - Err(ConstEvalFailure::Compiletime(_)) => { - // We've errored, so we don't have to produce working code. - let llty = type_of::type_of(bcx.ccx(), ty); - Const::new(C_undef(llty), ty) - } - Err(ConstEvalFailure::Runtime(err)) => { - span_bug!(constant.span, - "MIR constant {:?} results in runtime panic: {:?}", - constant, err.description()) - } - } + result.unwrap_or_else(|_| { + // We've errored, so we don't have to produce working code. + let llty = type_of::type_of(bcx.ccx(), ty); + Const::new(C_undef(llty), ty) + }) } } pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId) - -> Result { + -> Result { let instance = Instance::mono(ccx.shared(), def_id); MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 94db2e3c23cef..5e180887a3604 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -109,7 +109,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Lvalue::ReturnPointer => bug!(), // handled above mir::Lvalue::Static(def_id) => { let const_ty = self.monomorphized_lvalue_ty(lvalue); - LvalueRef::new_sized(consts::get_static(ccx, def_id).val, + LvalueRef::new_sized(consts::get_static(ccx, def_id), LvalueTy::from_ty(const_ty)) }, mir::Lvalue::Projection(box mir::Projection { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 727b680541dd7..474b2552e7079 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -145,7 +145,7 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { - let bcx = fcx.init(false, None).build(); + let bcx = fcx.init(true).build(); let mir = bcx.mir(); // Analyze the temps to determine which must be lvalues @@ -207,9 +207,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let block_bcxs: IndexVec> = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK { - fcx.new_block("start", None) + fcx.new_block("start") } else { - fcx.new_block(&format!("{:?}", bb), None) + fcx.new_block(&format!("{:?}", bb)) } }).collect(); diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 9f7c2ee219eb5..13484cb7a4ece 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -17,7 +17,6 @@ use asm; use base; use callee::Callee; use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result}; -use datum::{Datum, Lvalue}; use debuginfo::DebugLoc; use adt; use machine; @@ -101,7 +100,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let size = C_uint(bcx.ccx(), size); let base = get_dataptr(&bcx, dest.llval); let bcx = bcx.map_block(|block| { - tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| { + tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| { self.store_operand_direct(block, llslot, tr_elem); block }) @@ -157,8 +156,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { let lvalue = self.trans_lvalue(&bcx, output); - Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()), - Lvalue::new("out")) + (lvalue.llval, lvalue.ty.to_ty(bcx.tcx())) }).collect(); let input_vals = inputs.iter().map(|input| { @@ -202,7 +200,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { ty::TyFnDef(def_id, substs, _) => { OperandValue::Immediate( Callee::def(bcx.ccx(), def_id, substs) - .reify(bcx.ccx()).val) + .reify(bcx.ccx())) } _ => { bug!("{} cannot be reified to a fn ptr", operand.ty) diff --git a/src/librustc_trans/monomorphize.rs b/src/librustc_trans/monomorphize.rs index d1837883aaeb0..020ac8d643b86 100644 --- a/src/librustc_trans/monomorphize.rs +++ b/src/librustc_trans/monomorphize.rs @@ -8,162 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::ValueRef; -use llvm; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; use rustc::ty::subst::{Subst, Substs}; -use rustc::ty::{self, Ty, TypeFoldable, TyCtxt}; -use attributes; -use base::{push_ctxt}; -use base; +use rustc::ty::{self, Ty, TyCtxt}; use common::*; -use declare; -use Disr; -use rustc::hir::map as hir_map; use rustc::util::ppaux; -use rustc::hir; - -use errors; - use std::fmt; -use trans_item::TransItem; - -pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fn_id: DefId, - psubsts: &'tcx Substs<'tcx>) - -> (ValueRef, Ty<'tcx>) { - debug!("monomorphic_fn(fn_id={:?}, real_substs={:?})", fn_id, psubsts); - assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types()); - - let _icx = push_ctxt("monomorphic_fn"); - - let instance = Instance::new(fn_id, psubsts); - - let item_ty = ccx.tcx().lookup_item_type(fn_id).ty; - - debug!("monomorphic_fn about to subst into {:?}", item_ty); - let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty); - debug!("mono_ty = {:?} (post-substitution)", mono_ty); - - if let Some(&val) = ccx.instances().borrow().get(&instance) { - debug!("leaving monomorphic fn {:?}", instance); - return (val, mono_ty); - } else { - assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance))); - } - - debug!("monomorphic_fn({:?})", instance); - - ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); - - let depth; - { - let mut monomorphizing = ccx.monomorphizing().borrow_mut(); - depth = match monomorphizing.get(&fn_id) { - Some(&d) => d, None => 0 - }; - - debug!("monomorphic_fn: depth for fn_id={:?} is {:?}", fn_id, depth+1); - - // Random cut-off -- code that needs to instantiate the same function - // recursively more than thirty times can probably safely be assumed - // to be causing an infinite expansion. - if depth > ccx.sess().recursion_limit.get() { - let error = format!("reached the recursion limit while instantiating `{}`", - instance); - if let Some(id) = ccx.tcx().map.as_local_node_id(fn_id) { - ccx.sess().span_fatal(ccx.tcx().map.span(id), &error); - } else { - ccx.sess().fatal(&error); - } - } - - monomorphizing.insert(fn_id, depth + 1); - } - - let symbol = ccx.symbol_map().get_or_compute(ccx.shared(), - TransItem::Fn(instance)); - - debug!("monomorphize_fn mangled to {}", &symbol); - assert!(declare::get_defined_value(ccx, &symbol).is_none()); - - // FIXME(nagisa): perhaps needs a more fine grained selection? - let lldecl = declare::define_internal_fn(ccx, &symbol, mono_ty); - // FIXME(eddyb) Doubt all extern fn should allow unwinding. - attributes::unwind(lldecl, true); - - ccx.instances().borrow_mut().insert(instance, lldecl); - - // we can only monomorphize things in this crate (or inlined into it) - let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap(); - let map_node = errors::expect( - ccx.sess().diagnostic(), - ccx.tcx().map.find(fn_node_id), - || { - format!("while instantiating `{}`, couldn't find it in \ - the item map (may have attempted to monomorphize \ - an item defined in a different crate?)", - instance) - }); - match map_node { - hir_map::NodeItem(&hir::Item { - ref attrs, - node: hir::ItemFn(..), .. - }) | - hir_map::NodeImplItem(&hir::ImplItem { - ref attrs, node: hir::ImplItemKind::Method( - hir::MethodSig { .. }, _), .. - }) | - hir_map::NodeTraitItem(&hir::TraitItem { - ref attrs, node: hir::MethodTraitItem( - hir::MethodSig { .. }, Some(_)), .. - }) => { - let trans_item = TransItem::Fn(instance); - - if ccx.shared().translation_items().borrow().contains(&trans_item) { - attributes::from_fn_attrs(ccx, attrs, lldecl); - unsafe { - llvm::LLVMSetLinkage(lldecl, llvm::ExternalLinkage); - } - } else { - // FIXME: #34151 - // Normally, getting here would indicate a bug in trans::collector, - // since it seems to have missed a translation item. When we are - // translating with non-MIR based trans, however, the results of - // the collector are not entirely reliable since it bases its - // analysis on MIR. Thus, we'll instantiate the missing function - // privately in this codegen unit, so that things keep working. - ccx.stats().n_fallback_instantiations.set(ccx.stats() - .n_fallback_instantiations - .get() + 1); - trans_item.predefine(ccx, llvm::InternalLinkage); - trans_item.define(ccx); - } - } - - hir_map::NodeVariant(_) | hir_map::NodeStructCtor(_) => { - let disr = match map_node { - hir_map::NodeVariant(_) => { - Disr::from(inlined_variant_def(ccx, fn_node_id).disr_val) - } - hir_map::NodeStructCtor(_) => Disr(0), - _ => bug!() - }; - attributes::inline(lldecl, attributes::InlineAttr::Hint); - attributes::set_frame_pointer_elimination(ccx, lldecl); - base::trans_ctor_shim(ccx, fn_node_id, disr, psubsts, lldecl); - } - - _ => bug!("can't monomorphize a {:?}", map_node) - }; - - ccx.monomorphizing().borrow_mut().insert(fn_id, depth); - - debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id)); - (lldecl, mono_ty) -} #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct Instance<'tcx> { diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs index 580882e31dd60..90dcc3a61fd7e 100644 --- a/src/librustc_trans/trans_item.rs +++ b/src/librustc_trans/trans_item.rs @@ -22,17 +22,15 @@ use declare; use glue::DropGlueKind; use llvm; use monomorphize::{self, Instance}; -use inline; use rustc::dep_graph::DepNode; use rustc::hir; -use rustc::hir::map as hir_map; use rustc::hir::def_id::DefId; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::subst::Substs; use rustc_const_eval::fatal_const_eval_err; use std::hash::{Hash, Hasher}; use syntax::ast::{self, NodeId}; -use syntax::{attr,errors}; +use syntax::attr; use type_of; use glue; use abi::{Abi, FnType}; @@ -88,13 +86,13 @@ impl<'a, 'tcx> TransItem<'tcx> { let def_id = ccx.tcx().map.local_def_id(node_id); let _task = ccx.tcx().dep_graph.in_task(DepNode::TransCrateItem(def_id)); // (*) let item = ccx.tcx().map.expect_item(node_id); - if let hir::ItemStatic(_, m, ref expr) = item.node { - match consts::trans_static(&ccx, m, expr, item.id, &item.attrs) { + if let hir::ItemStatic(_, m, _) = item.node { + match consts::trans_static(&ccx, m, item.id, &item.attrs) { Ok(_) => { /* Cool, everything's alright. */ }, Err(err) => { // FIXME: shouldn't this be a `span_err`? fatal_const_eval_err( - ccx.tcx(), &err, expr.span, "static"); + ccx.tcx(), &err, item.span, "static"); } }; } else { @@ -157,20 +155,16 @@ impl<'a, 'tcx> TransItem<'tcx> { let ty = ccx.tcx().lookup_item_type(def_id).ty; let llty = type_of::type_of(ccx, ty); - match ccx.tcx().map.get(node_id) { - hir::map::NodeItem(&hir::Item { - span, node: hir::ItemStatic(..), .. - }) => { - let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { - ccx.sess().span_fatal(span, - &format!("symbol `{}` is already defined", symbol_name)) - }); + let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { + ccx.sess().span_fatal(ccx.tcx().map.span(node_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); - unsafe { llvm::LLVMSetLinkage(g, linkage) }; - } + unsafe { llvm::LLVMSetLinkage(g, linkage) }; - item => bug!("predefine_static: expected static, found {:?}", item) - } + let instance = Instance::mono(ccx.shared(), def_id); + ccx.instances().borrow_mut().insert(instance, g); + ccx.statics().borrow_mut().insert(g, def_id); } fn predefine_fn(ccx: &CrateContext<'a, 'tcx>, @@ -180,47 +174,22 @@ impl<'a, 'tcx> TransItem<'tcx> { assert!(!instance.substs.types.needs_infer() && !instance.substs.types.has_param_types()); - let instance = inline::maybe_inline_instance(ccx, instance); - let item_ty = ccx.tcx().lookup_item_type(instance.def).ty; let item_ty = ccx.tcx().erase_regions(&item_ty); let mono_ty = monomorphize::apply_param_substs(ccx.tcx(), instance.substs, &item_ty); - let fn_node_id = ccx.tcx().map.as_local_node_id(instance.def).unwrap(); - let map_node = errors::expect( - ccx.sess().diagnostic(), - ccx.tcx().map.find(fn_node_id), - || { - format!("while instantiating `{}`, couldn't find it in \ - the item map (may have attempted to monomorphize \ - an item defined in a different crate?)", - instance) - }); - - match map_node { - hir_map::NodeItem(&hir::Item { - ref attrs, node: hir::ItemFn(..), .. - }) | - hir_map::NodeTraitItem(&hir::TraitItem { - ref attrs, node: hir::MethodTraitItem(..), .. - }) | - hir_map::NodeImplItem(&hir::ImplItem { - ref attrs, node: hir::ImplItemKind::Method(..), .. - }) => { - let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty); - unsafe { llvm::LLVMSetLinkage(lldecl, linkage) }; - base::set_link_section(ccx, lldecl, attrs); - if linkage == llvm::LinkOnceODRLinkage || - linkage == llvm::WeakODRLinkage { - llvm::SetUniqueComdat(ccx.llmod(), lldecl); - } + let attrs = ccx.tcx().get_attrs(instance.def); + let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty); + unsafe { llvm::LLVMSetLinkage(lldecl, linkage) }; + base::set_link_section(ccx, lldecl, &attrs); + if linkage == llvm::LinkOnceODRLinkage || + linkage == llvm::WeakODRLinkage { + llvm::SetUniqueComdat(ccx.llmod(), lldecl); + } - attributes::from_fn_attrs(ccx, attrs, lldecl); - ccx.instances().borrow_mut().insert(instance, lldecl); - } - _ => bug!("Invalid item for TransItem::Fn: `{:?}`", map_node) - }; + attributes::from_fn_attrs(ccx, &attrs, lldecl); + ccx.instances().borrow_mut().insert(instance, lldecl); } fn predefine_drop_glue(ccx: &CrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 92a2d3787bfd6..7e4719870cd83 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -13,383 +13,51 @@ use llvm; use llvm::ValueRef; use base::*; -use base; use build::*; -use cleanup; -use cleanup::CleanupMethods; use common::*; -use consts; -use datum::*; use debuginfo::DebugLoc; -use expr::{Dest, Ignore, SaveIn}; -use expr; -use machine::llsize_of_alloc; -use type_::Type; -use type_of; -use value::Value; -use rustc::ty::{self, Ty}; - -use rustc::hir; -use rustc_const_eval::eval_length; - -use syntax::ast; -use syntax::parse::token::InternedString; - -#[derive(Copy, Clone, Debug)] -struct VecTypes<'tcx> { - unit_ty: Ty<'tcx>, - llunit_ty: Type -} - -pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: expr::Dest) - -> Block<'blk, 'tcx> { - //! - // - // [...] allocates a fixed-size array and moves it around "by value". - // In this case, it means that the caller has already given us a location - // to store the array of the suitable size, so all we have to do is - // generate the content. - - debug!("trans_fixed_vstore(expr={:?}, dest={:?})", expr, dest); - - let vt = vec_types_from_expr(bcx, expr); - - return match dest { - Ignore => write_content(bcx, &vt, expr, expr, dest), - SaveIn(lldest) => { - // lldest will have type *[T x N], but we want the type *T, - // so use GEP to convert: - let lldest = StructGEP(bcx, lldest, 0); - write_content(bcx, &vt, expr, expr, SaveIn(lldest)) - } - }; -} - -/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the -/// caller must make the reference). "..." is similar except that the memory can be statically -/// allocated and we return a reference (strings are always by-ref). -pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - slice_expr: &hir::Expr, - content_expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { +use rustc::ty::Ty; + +pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, + data_ptr: ValueRef, + unit_ty: Ty<'tcx>, + len: ValueRef, + f: F) + -> Block<'blk, 'tcx> where + F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, +{ + let _icx = push_ctxt("tvec::slice_for_each"); let fcx = bcx.fcx; - let mut bcx = bcx; - - debug!("trans_slice_vec(slice_expr={:?})", - slice_expr); - - let vec_ty = node_id_type(bcx, slice_expr.id); - - // Handle the "..." case (returns a slice since strings are always unsized): - if let hir::ExprLit(ref lit) = content_expr.node { - if let ast::LitKind::Str(ref s, _) = lit.node { - let scratch = rvalue_scratch_datum(bcx, vec_ty, ""); - bcx = trans_lit_str(bcx, - content_expr, - s.clone(), - SaveIn(scratch.val)); - return DatumBlock::new(bcx, scratch.to_expr_datum()); - } - } - - // Handle the &[...] case: - let vt = vec_types_from_expr(bcx, content_expr); - let count = elements_required(bcx, content_expr); - debug!(" vt={:?}, count={}", vt, count); - let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count); - - // Always create an alloca even if zero-sized, to preserve - // the non-null invariant of the inner slice ptr - let llfixed; - // Issue 30018: ensure state is initialized as dropped if necessary. - if fcx.type_needs_drop(vt.unit_ty) { - llfixed = base::alloc_ty_init(bcx, fixed_ty, InitAlloca::Dropped, ""); + // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) + let zst = type_is_zero_size(bcx.ccx(), unit_ty); + let add = |bcx, a, b| if zst { + Add(bcx, a, b, DebugLoc::None) } else { - let uninit = InitAlloca::Uninit("fcx says vt.unit_ty is non-drop"); - llfixed = base::alloc_ty_init(bcx, fixed_ty, uninit, ""); - call_lifetime_start(bcx, llfixed); - }; - - if count > 0 { - // Arrange for the backing array to be cleaned up. - let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id); - fcx.schedule_lifetime_end(cleanup_scope, llfixed); - fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None); - - // Generate the content into the backing array. - // llfixed has type *[T x N], but we want the type *T, - // so use GEP to convert - bcx = write_content(bcx, &vt, slice_expr, content_expr, - SaveIn(StructGEP(bcx, llfixed, 0))); + InBoundsGEP(bcx, a, &[b]) }; - immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock() -} - -/// Literal strings translate to slices into static memory. This is different from -/// trans_slice_vstore() above because it doesn't need to copy the content anywhere. -pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lit_expr: &hir::Expr, - str_lit: InternedString, - dest: Dest) - -> Block<'blk, 'tcx> { - debug!("trans_lit_str(lit_expr={:?}, dest={:?})", lit_expr, dest); - - match dest { - Ignore => bcx, - SaveIn(lldest) => { - let bytes = str_lit.len(); - let llbytes = C_uint(bcx.ccx(), bytes); - let llcstr = C_cstr(bcx.ccx(), str_lit, false); - let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx())); - Store(bcx, llcstr, expr::get_dataptr(bcx, lldest)); - Store(bcx, llbytes, expr::get_meta(bcx, lldest)); - bcx - } - } -} - -fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - vt: &VecTypes<'tcx>, - vstore_expr: &hir::Expr, - content_expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("tvec::write_content"); - let fcx = bcx.fcx; - let mut bcx = bcx; - - debug!("write_content(vt={:?}, dest={:?}, vstore_expr={:?})", - vt, dest, vstore_expr); - - match content_expr.node { - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitKind::Str(ref s, _) => { - match dest { - Ignore => return bcx, - SaveIn(lldest) => { - let bytes = s.len(); - let llbytes = C_uint(bcx.ccx(), bytes); - let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false); - if !bcx.unreachable.get() { - base::call_memcpy(&B(bcx), lldest, llcstr, llbytes, 1); - } - return bcx; - } - } - } - _ => { - span_bug!(content_expr.span, "unexpected evec content"); - } - } - } - hir::ExprVec(ref elements) => { - match dest { - Ignore => { - for element in elements { - bcx = expr::trans_into(bcx, &element, Ignore); - } - } - - SaveIn(lldest) => { - let temp_scope = fcx.push_custom_cleanup_scope(); - for (i, element) in elements.iter().enumerate() { - let lleltptr = GEPi(bcx, lldest, &[i]); - debug!("writing index {} with lleltptr={:?}", - i, Value(lleltptr)); - bcx = expr::trans_into(bcx, &element, - SaveIn(lleltptr)); - let scope = cleanup::CustomScope(temp_scope); - // Issue #30822: mark memory as dropped after running destructor - fcx.schedule_drop_and_fill_mem(scope, lleltptr, vt.unit_ty, None); - } - fcx.pop_custom_cleanup_scope(temp_scope); - } - } - return bcx; - } - hir::ExprRepeat(ref element, ref count_expr) => { - match dest { - Ignore => { - return expr::trans_into(bcx, &element, Ignore); - } - SaveIn(lldest) => { - match eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap() { - 0 => expr::trans_into(bcx, &element, Ignore), - 1 => expr::trans_into(bcx, &element, SaveIn(lldest)), - count => { - let elem = unpack_datum!(bcx, expr::trans(bcx, &element)); - let bcx = iter_vec_loop(bcx, lldest, vt, - C_uint(bcx.ccx(), count), - |set_bcx, lleltptr, _| { - elem.shallow_copy(set_bcx, lleltptr) - }); - bcx - } - } - } - } - } - _ => { - span_bug!(content_expr.span, "unexpected vec content"); - } - } -} - -fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &hir::Expr) - -> VecTypes<'tcx> { - let vec_ty = node_id_type(bcx, vec_expr.id); - vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx())) -} - -fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>) - -> VecTypes<'tcx> { - VecTypes { - unit_ty: unit_ty, - llunit_ty: type_of::type_of(bcx.ccx(), unit_ty) - } -} - -fn elements_required(bcx: Block, content_expr: &hir::Expr) -> usize { - //! Figure out the number of elements we need to store this content - - match content_expr.node { - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitKind::Str(ref s, _) => s.len(), - _ => { - span_bug!(content_expr.span, "unexpected evec content") - } - } - }, - hir::ExprVec(ref es) => es.len(), - hir::ExprRepeat(_, ref count_expr) => { - eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap() - } - _ => span_bug!(content_expr.span, "unexpected vec content") - } -} - -/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval` -/// which should be by ref. -pub fn get_fixed_base_and_len(bcx: Block, - llval: ValueRef, - vec_length: usize) - -> (ValueRef, ValueRef) { - let ccx = bcx.ccx(); - - let base = expr::get_dataptr(bcx, llval); - let len = C_uint(ccx, vec_length); - (base, len) -} - -/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be -/// by-reference. If you have a datum, you would probably prefer to call -/// `Datum::get_base_and_len()` which will handle any conversions for you. -pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llval: ValueRef, - vec_ty: Ty<'tcx>) - -> (ValueRef, ValueRef) { - match vec_ty.sty { - ty::TyArray(_, n) => get_fixed_base_and_len(bcx, llval, n), - ty::TySlice(_) | ty::TyStr => { - let base = Load(bcx, expr::get_dataptr(bcx, llval)); - let len = Load(bcx, expr::get_meta(bcx, llval)); - (base, len) - } - - // Only used for pattern matching. - ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => { - let inner = if type_is_sized(bcx.tcx(), ty) { - Load(bcx, llval) - } else { - llval - }; - get_base_and_len(bcx, inner, ty) - }, - _ => bug!("unexpected type in get_base_and_len"), - } -} + let header_bcx = fcx.new_block("slice_loop_header"); + let body_bcx = fcx.new_block("slice_loop_body"); + let next_bcx = fcx.new_block("slice_loop_next"); -fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - vt: &VecTypes<'tcx>, - count: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, -{ - let _icx = push_ctxt("tvec::iter_vec_loop"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let loop_bcx = fcx.new_temp_block("expr_repeat"); - let next_bcx = fcx.new_temp_block("expr_repeat: next"); - - Br(bcx, loop_bcx.llbb, DebugLoc::None); - - let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(), - &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]); - - let bcx = loop_bcx; - - let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 { - data_ptr + let start = if zst { + C_uint(bcx.ccx(), 0 as usize) } else { - InBoundsGEP(bcx, data_ptr, &[loop_counter]) + data_ptr }; - let bcx = f(bcx, lleltptr, vt.unit_ty); - let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None); - AddIncomingToPhi(loop_counter, plusone, bcx.llbb); + let end = add(bcx, start, len); - let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None); - CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None); + Br(bcx, header_bcx.llbb, DebugLoc::None); + let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]); - next_bcx -} + let keep_going = + ICmp(header_bcx, llvm::IntULT, current, end, DebugLoc::None); + CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); -pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - unit_ty: Ty<'tcx>, - len: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, -{ - let _icx = push_ctxt("tvec::iter_vec_raw"); - let fcx = bcx.fcx; - - let vt = vec_types(bcx, unit_ty); - - if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 { - // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) - iter_vec_loop(bcx, data_ptr, &vt, len, f) - } else { - // Calculate the last pointer address we want to handle. - let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]); - - // Now perform the iteration. - let header_bcx = fcx.new_temp_block("iter_vec_loop_header"); - Br(bcx, header_bcx.llbb, DebugLoc::None); - let data_ptr = - Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]); - let not_yet_at_end = - ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None); - let body_bcx = fcx.new_temp_block("iter_vec_loop_body"); - let next_bcx = fcx.new_temp_block("iter_vec_next"); - CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); - let body_bcx = f(body_bcx, data_ptr, unit_ty); - AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr, - &[C_int(bcx.ccx(), 1)]), - body_bcx.llbb); - Br(body_bcx, header_bcx.llbb, DebugLoc::None); - next_bcx - } + let body_bcx = f(body_bcx, if zst { data_ptr } else { current }); + let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize)); + AddIncomingToPhi(current, next, body_bcx.llbb); + Br(body_bcx, header_bcx.llbb, DebugLoc::None); + next_bcx } diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index e6da03a903f21..7f9e715b7fafc 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -122,7 +122,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { ], ccx.tcx.types.usize) } "rustc_peek" => (1, vec![param(ccx, 0)], param(ccx, 0)), - "init" | "init_dropped" => (1, Vec::new(), param(ccx, 0)), + "init" => (1, Vec::new(), param(ccx, 0)), "uninit" => (1, Vec::new(), param(ccx, 0)), "forget" => (1, vec!( param(ccx, 0) ), tcx.mk_nil()), "transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)), diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs index 02931e5e3890d..8f02c9c7d3de0 100644 --- a/src/libstd/collections/hash/table.rs +++ b/src/libstd/collections/hash/table.rs @@ -59,7 +59,7 @@ const EMPTY_BUCKET: u64 = 0; /// around just the "table" part of the hashtable. It enforces some /// invariants at the type level and employs some performance trickery, /// but in general is just a tricked out `Vec>`. -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] pub struct RawTable { capacity: usize, size: usize, @@ -1042,7 +1042,7 @@ impl Clone for RawTable { impl Drop for RawTable { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { - if self.capacity == 0 || self.capacity == mem::POST_DROP_USIZE { + if self.capacity == 0 { return; } diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index d4644a190f4d9..2b15f3dd9455c 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -275,7 +275,7 @@ #![feature(unboxed_closures)] #![feature(unicode)] #![feature(unique)] -#![feature(unsafe_no_drop_flag, filling_drop)] +#![cfg_attr(stage0, feature(unsafe_no_drop_flag))] #![feature(unwind_attributes)] #![feature(vec_push_all)] #![feature(zero_one)] diff --git a/src/libsyntax/feature_gate.rs b/src/libsyntax/feature_gate.rs index d94bfe7dcbdac..d746f8e21141f 100644 --- a/src/libsyntax/feature_gate.rs +++ b/src/libsyntax/feature_gate.rs @@ -164,10 +164,6 @@ declare_features! ( // Allows using `box` in patterns; RFC 469 (active, box_patterns, "1.0.0", Some(29641)), - // Allows using the unsafe_no_drop_flag attribute (unlikely to - // switch to Accepted; see RFC 320) - (active, unsafe_no_drop_flag, "1.0.0", None), - // Allows using the unsafe_destructor_blind_to_params attribute; // RFC 1238 (active, dropck_parametricity, "1.3.0", Some(28498)), @@ -300,7 +296,8 @@ declare_features! ( (removed, quad_precision_float, "1.0.0", None), (removed, struct_inherit, "1.0.0", None), (removed, test_removed_feature, "1.0.0", None), - (removed, visible_private_types, "1.0.0", None) + (removed, visible_private_types, "1.0.0", None), + (removed, unsafe_no_drop_flag, "1.0.0", None) ); declare_features! ( @@ -517,11 +514,6 @@ pub const KNOWN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGat is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), - ("rustc_no_mir", Whitelisted, Gated("rustc_attrs", - "the `#[rustc_no_mir]` attribute \ - is just used to make tests pass \ - and will never be stable", - cfg_fn!(rustc_attrs))), ("rustc_inherit_overflow_checks", Whitelisted, Gated("rustc_attrs", "the `#[rustc_inherit_overflow_checks]` \ attribute is just used to control \ @@ -570,10 +562,6 @@ pub const KNOWN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGat attribute is just used for the Rust test \ suite", cfg_fn!(omit_gdb_pretty_printer_section))), - ("unsafe_no_drop_flag", Whitelisted, Gated("unsafe_no_drop_flag", - "unsafe_no_drop_flag has unstable semantics \ - and may be removed in the future", - cfg_fn!(unsafe_no_drop_flag))), ("unsafe_destructor_blind_to_params", Normal, Gated("dropck_parametricity", diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index b4311fc007d3d..65bc9f34c9061 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -26,7 +26,6 @@ #![feature(associated_consts)] #![feature(const_fn)] -#![feature(filling_drop)] #![feature(libc)] #![feature(rustc_private)] #![feature(staged_api)] diff --git a/src/libsyntax/ptr.rs b/src/libsyntax/ptr.rs index 9d04cb75daa0e..c3f8a977a659b 100644 --- a/src/libsyntax/ptr.rs +++ b/src/libsyntax/ptr.rs @@ -39,7 +39,7 @@ use std::fmt::{self, Display, Debug}; use std::iter::FromIterator; use std::ops::Deref; -use std::{ptr, slice, vec}; +use std::{mem, ptr, slice, vec}; use serialize::{Encodable, Decodable, Encoder, Decoder}; @@ -74,12 +74,22 @@ impl P { pub fn map(mut self, f: F) -> P where F: FnOnce(T) -> T, { + let p: *mut T = &mut *self.ptr; + + // Leak self in case of panic. + // FIXME(eddyb) Use some sort of "free guard" that + // only deallocates, without dropping the pointee, + // in case the call the `f` below ends in a panic. + mem::forget(self); + unsafe { - let p = &mut *self.ptr; - // FIXME(#5016) this shouldn't need to drop-fill to be safe. - ptr::write(p, f(ptr::read_and_drop(p))); + ptr::write(p, f(ptr::read(p))); + + // Recreate self from the raw pointer. + P { + ptr: Box::from_raw(p) + } } - self } } diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 20d0493943452..40603845da2b0 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -11,7 +11,6 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] -#![feature(rustc_attrs)] // Hack to get the correct size for the length part in slices // CHECK: @helper([[USIZE:i[0-9]+]]) @@ -21,13 +20,12 @@ fn helper(_: usize) { // CHECK-LABEL: @no_op_slice_adjustment #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot -// CHECK: [[SRC:%[0-9]+]] = bitcast { i8*, [[USIZE]] }* %x to -// CHECK: [[DST:%[0-9]+]] = bitcast { i8*, [[USIZE]] }* %sret_slot to i8* -// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[DST]], i8* [[SRC]], +// CHECK: %2 = insertvalue { i8*, [[USIZE]] } undef, i8* %0, 0 +// CHECK: %3 = insertvalue { i8*, [[USIZE]] } %2, [[USIZE]] %1, 1 +// CHECK: ret { i8*, [[USIZE]] } %3 { x } } diff --git a/src/test/codegen/coercions.rs b/src/test/codegen/coercions.rs index 74c7192259ac4..c8c9f5b407c42 100644 --- a/src/test/codegen/coercions.rs +++ b/src/test/codegen/coercions.rs @@ -11,14 +11,12 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] -#![feature(rustc_attrs)] static X: i32 = 5; // CHECK-LABEL: @raw_ptr_to_raw_ptr_noop // CHECK-NOT: alloca #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn raw_ptr_to_raw_ptr_noop() -> *const i32{ &X as *const i32 } @@ -26,7 +24,6 @@ pub fn raw_ptr_to_raw_ptr_noop() -> *const i32{ // CHECK-LABEL: @reference_to_raw_ptr_noop // CHECK-NOT: alloca #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn reference_to_raw_ptr_noop() -> *const i32 { &X } diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs index ea4c932d43549..36a582ca73709 100644 --- a/src/test/codegen/consts.rs +++ b/src/test/codegen/consts.rs @@ -11,7 +11,6 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] -#![feature(rustc_attrs)] // Below, these constants are defined as enum variants that by itself would // have a lower alignment than the enum type. Ensure that we mark them @@ -20,11 +19,12 @@ // CHECK: @STATIC = {{.*}}, align 4 // This checks the constants from inline_enum_const -// CHECK: @const{{[0-9]+}} = {{.*}}, align 2 +// CHECK: @ref{{[0-9]+}} = {{.*}}, align 2 // This checks the constants from {low,high}_align_const, they share the same // constant, but the alignment differs, so the higher one should be used -// CHECK: @const{{[0-9]+}} = {{.*}}, align 4 +// CHECK: [[LOW_HIGH:@ref[0-9]+]] = {{.*}}, align 4 +// CHECK: [[LOW_HIGH_REF:@const[0-9]+]] = {{.*}} [[LOW_HIGH]] #[derive(Copy, Clone)] @@ -40,32 +40,28 @@ pub static STATIC: E = E::A(0); // CHECK-LABEL: @static_enum_const #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn static_enum_const() -> E { STATIC } // CHECK-LABEL: @inline_enum_const #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn inline_enum_const() -> E { - E::A(0) + *&E::A(0) } // CHECK-LABEL: @low_align_const #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn low_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{[0-9]+}}, i8* {{.*}} [[LOW_HIGH:@const[0-9]+]] - E::A(0) +// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]] + *&E::A(0) } // CHECK-LABEL: @high_align_const #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn high_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{[0-9]}}, i8* {{.*}} [[LOW_HIGH]] - E::A(0) +// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]] + *&E::A(0) } diff --git a/src/test/codegen/drop.rs b/src/test/codegen/drop.rs index 25f8c13046997..a4bd5cf2c158e 100644 --- a/src/test/codegen/drop.rs +++ b/src/test/codegen/drop.rs @@ -11,7 +11,6 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] -#![feature(rustc_attrs)] struct SomeUniqueName; @@ -25,19 +24,20 @@ pub fn possibly_unwinding() { // CHECK-LABEL: @droppy #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn droppy() { // Check that there are exactly 6 drop calls. The cleanups for the unwinding should be reused, so // that's one new drop call per call to possibly_unwinding(), and finally 3 drop calls for the // regular function exit. We used to have problems with quadratic growths of drop calls in such // functions. -// CHECK: call{{.*}}drop{{.*}}SomeUniqueName -// CHECK: call{{.*}}drop{{.*}}SomeUniqueName -// CHECK: call{{.*}}drop{{.*}}SomeUniqueName +// CHECK-NOT: invoke{{.*}}drop{{.*}}SomeUniqueName // CHECK: call{{.*}}drop{{.*}}SomeUniqueName // CHECK: call{{.*}}drop{{.*}}SomeUniqueName // CHECK: call{{.*}}drop{{.*}}SomeUniqueName // CHECK-NOT: call{{.*}}drop{{.*}}SomeUniqueName +// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName +// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName +// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName +// CHECK-NOT: {{(call|invoke).*}}drop{{.*}}SomeUniqueName // The next line checks for the } that ends the function definition // CHECK-LABEL: {{^[}]}} let _s = SomeUniqueName; diff --git a/src/test/codegen/loads.rs b/src/test/codegen/loads.rs index a65a3e1bb66fe..def5269e07a02 100644 --- a/src/test/codegen/loads.rs +++ b/src/test/codegen/loads.rs @@ -11,7 +11,6 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] -#![feature(rustc_attrs)] pub struct Bytes { a: u8, @@ -22,15 +21,14 @@ pub struct Bytes { // CHECK-LABEL: @borrow #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn borrow(x: &i32) -> &i32 { // CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull + &x; // keep variable in an alloca x } // CHECK-LABEL: @_box #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn _box(x: Box) -> i32 { // CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull *x diff --git a/src/test/codegen/mir_zst_stores.rs b/src/test/codegen/mir_zst_stores.rs index c1acdaf703191..a2cedc853a1e6 100644 --- a/src/test/codegen/mir_zst_stores.rs +++ b/src/test/codegen/mir_zst_stores.rs @@ -10,7 +10,6 @@ // compile-flags: -C no-prepopulate-passes -#![feature(rustc_attrs)] #![crate_type = "lib"] use std::marker::PhantomData; @@ -19,7 +18,6 @@ struct Zst { phantom: PhantomData } // CHECK-LABEL: @mir #[no_mangle] -#[rustc_mir] fn mir(){ // CHECK-NOT: getelementptr // CHECK-NOT: store{{.*}}undef diff --git a/src/test/codegen/naked-functions.rs b/src/test/codegen/naked-functions.rs index 199f7f0201877..9de74f72005e3 100644 --- a/src/test/codegen/naked-functions.rs +++ b/src/test/codegen/naked-functions.rs @@ -13,7 +13,7 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] -#![feature(naked_functions, rustc_attrs)] +#![feature(naked_functions)] // CHECK: Function Attrs: naked uwtable // CHECK-NEXT: define internal void @naked_empty() @@ -26,11 +26,11 @@ fn naked_empty() { // CHECK: Function Attrs: naked uwtable #[no_mangle] #[naked] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. // CHECK-NEXT: define internal void @naked_with_args(i{{[0-9]+}}) fn naked_with_args(a: isize) { // CHECK: %a = alloca i{{[0-9]+}} // CHECK: ret void + &a; // keep variable in an alloca } // CHECK: Function Attrs: naked uwtable @@ -46,10 +46,10 @@ fn naked_with_return() -> isize { // CHECK-NEXT: define internal i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}}) #[no_mangle] #[naked] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. fn naked_with_args_and_return(a: isize) -> isize { // CHECK: %a = alloca i{{[0-9]+}} // CHECK: ret i{{[0-9]+}} %{{[0-9]+}} + &a; // keep variable in an alloca a } diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index 36c83412e4f0f..49ed2229fcd2b 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -11,7 +11,6 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] -#![feature(rustc_attrs)] // Hack to get the correct size for the length part in slices // CHECK: @helper([[USIZE:i[0-9]+]]) @@ -21,12 +20,14 @@ fn helper(_: usize) { // CHECK-LABEL: @ref_dst #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[SRC:%[0-9]+]] = bitcast { i8*, [[USIZE]] }* %s to i8* -// CHECK: [[DST:%[0-9]+]] = bitcast { i8*, [[USIZE]] }* %x to i8* -// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[DST]], i8* [[SRC]], +// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0 +// CHECK: store i8* %0, i8** [[X0]] +// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1 +// CHECK: store [[USIZE]] %1, [[USIZE]]* [[X1]] + let x = &*s; + &x; // keep variable in an alloca } diff --git a/src/test/codegen/stores.rs b/src/test/codegen/stores.rs index 89bb5d93c74fa..9141b7245e35a 100644 --- a/src/test/codegen/stores.rs +++ b/src/test/codegen/stores.rs @@ -11,7 +11,6 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] -#![feature(rustc_attrs)] pub struct Bytes { a: u8, @@ -24,12 +23,11 @@ pub struct Bytes { // The array is stored as i32, but its alignment is lower, go with 1 byte to avoid target // dependent alignment #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { -// CHECK: %y = alloca [4 x i8] +// CHECK: %arg1 = alloca [4 x i8] // CHECK: [[TMP:%.+]] = alloca i32 // CHECK: store i32 %1, i32* [[TMP]] -// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %y to i8* +// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* // CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false) *x = y; @@ -39,12 +37,11 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { // The struct is stored as i32, but its alignment is lower, go with 1 byte to avoid target // dependent alignment #[no_mangle] -#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { -// CHECK: %y = alloca %Bytes +// CHECK: %arg1 = alloca %Bytes // CHECK: [[TMP:%.+]] = alloca i32 // CHECK: store i32 %1, i32* [[TMP]] -// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %y to i8* +// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* // CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false) *x = y; diff --git a/src/test/compile-fail/enable-orbit-for-incr-comp.rs b/src/test/compile-fail/enable-orbit-for-incr-comp.rs deleted file mode 100644 index eec6bad731e33..0000000000000 --- a/src/test/compile-fail/enable-orbit-for-incr-comp.rs +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// ignore-pretty -// compile-flags:-Zincremental=tmp/cfail-tests/enable-orbit-for-incr-comp -Zorbit=off -// error-pattern:Automatically enabling `-Z orbit` because `-Z incremental` was specified - -#![deny(warnings)] - -fn main() { - FAIL! // We just need some compilation error. What we really care about is - // that the error pattern above is checked. -} diff --git a/src/test/compile-fail/lint-no-drop-on-repr-extern.rs b/src/test/compile-fail/lint-no-drop-on-repr-extern.rs deleted file mode 100644 index 91e5065517dcc..0000000000000 --- a/src/test/compile-fail/lint-no-drop-on-repr-extern.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Check we reject structs that mix a `Drop` impl with `#[repr(C)]`. -// -// As a special case, also check that we do not warn on such structs -// if they also are declared with `#[unsafe_no_drop_flag]` - -#![feature(unsafe_no_drop_flag)] -#![deny(drop_with_repr_extern)] -//~^ NOTE lint level defined here -//~| NOTE lint level defined here - -#[repr(C)] struct As { x: Box } -#[repr(C)] enum Ae { Ae(Box), _None } - -struct Bs { x: Box } -enum Be { Be(Box), _None } - -#[repr(C)] struct Cs { x: Box } -//~^ NOTE the `#[repr(C)]` attribute is attached here - -impl Drop for Cs { fn drop(&mut self) { } } -//~^ ERROR implementing Drop adds hidden state to types, possibly conflicting with `#[repr(C)]` - -#[repr(C)] enum Ce { Ce(Box), _None } -//~^ NOTE the `#[repr(C)]` attribute is attached here - -impl Drop for Ce { fn drop(&mut self) { } } -//~^ ERROR implementing Drop adds hidden state to types, possibly conflicting with `#[repr(C)]` - -#[unsafe_no_drop_flag] -#[repr(C)] struct Ds { x: Box } - -impl Drop for Ds { fn drop(&mut self) { } } - -#[unsafe_no_drop_flag] -#[repr(C)] enum De { De(Box), _None } - -impl Drop for De { fn drop(&mut self) { } } - -fn main() { - let a = As { x: Box::new(3) }; - let b = Bs { x: Box::new(3) }; - let c = Cs { x: Box::new(3) }; - let d = Ds { x: Box::new(3) }; - - println!("{:?}", (*a.x, *b.x, *c.x, *d.x)); - - let _a = Ae::Ae(Box::new(3)); - let _b = Be::Be(Box::new(3)); - let _c = Ce::Ce(Box::new(3)); - let _d = De::De(Box::new(3)); -} diff --git a/src/test/compile-fail/unsafe_no_drop_flag-gate.rs b/src/test/compile-fail/unsafe_no_drop_flag-gate.rs deleted file mode 100644 index 542698fd15295..0000000000000 --- a/src/test/compile-fail/unsafe_no_drop_flag-gate.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub struct T; - -#[unsafe_no_drop_flag] -//~^ ERROR unsafe_no_drop_flag has unstable semantics and may be removed -pub struct S { - pub x: T, -} - -impl Drop for S { - fn drop(&mut self) {} -} - -pub fn main() {} diff --git a/src/test/run-fail/issue-30380.rs b/src/test/run-fail/issue-30380.rs index eb668517bdf34..7bd9adcba9bd1 100644 --- a/src/test/run-fail/issue-30380.rs +++ b/src/test/run-fail/issue-30380.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - // check that panics in destructors during assignment do not leave // destroyed values lying around for other destructors to observe. @@ -35,7 +33,6 @@ impl<'a> Drop for Observer<'a> { } } -#[rustc_mir] fn foo(b: &mut Observer) { *b.0 = FilledOnDrop(1); } diff --git a/src/test/run-fail/mir_drop_panics.rs b/src/test/run-fail/mir_drop_panics.rs index 1a4330523babe..98311525ad0f2 100644 --- a/src/test/run-fail/mir_drop_panics.rs +++ b/src/test/run-fail/mir_drop_panics.rs @@ -7,7 +7,6 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] // error-pattern:panic 1 // error-pattern:drop 2 @@ -24,7 +23,6 @@ impl Drop for Droppable { } } -#[rustc_mir] fn mir() { let x = Droppable(2); let y = Droppable(1); diff --git a/src/test/run-fail/mir_dynamic_drops_1.rs b/src/test/run-fail/mir_dynamic_drops_1.rs index 16160a1496ff9..6cf2851d93d47 100644 --- a/src/test/run-fail/mir_dynamic_drops_1.rs +++ b/src/test/run-fail/mir_dynamic_drops_1.rs @@ -7,7 +7,6 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] // error-pattern:drop 1 // error-pattern:drop 2 use std::io::{self, Write}; @@ -26,7 +25,6 @@ impl<'a> Drop for Droppable<'a> { } } -#[rustc_mir] fn mir() { let (mut xv, mut yv) = (false, false); let x = Droppable(&mut xv, 1); diff --git a/src/test/run-fail/mir_dynamic_drops_2.rs b/src/test/run-fail/mir_dynamic_drops_2.rs index 803ca53bf7a84..7a90298e42253 100644 --- a/src/test/run-fail/mir_dynamic_drops_2.rs +++ b/src/test/run-fail/mir_dynamic_drops_2.rs @@ -7,7 +7,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] + // error-pattern:drop 1 use std::io::{self, Write}; @@ -25,7 +25,6 @@ impl<'a> Drop for Droppable<'a> { } } -#[rustc_mir] fn mir<'a>(d: Droppable<'a>) { loop { let x = d; diff --git a/src/test/run-fail/mir_dynamic_drops_3.rs b/src/test/run-fail/mir_dynamic_drops_3.rs index afc037f48aa43..79ecbbb35bc56 100644 --- a/src/test/run-fail/mir_dynamic_drops_3.rs +++ b/src/test/run-fail/mir_dynamic_drops_3.rs @@ -7,7 +7,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] + // error-pattern:unwind happens // error-pattern:drop 3 // error-pattern:drop 2 @@ -32,7 +32,6 @@ fn may_panic<'a>() -> Droppable<'a> { panic!("unwind happens"); } -#[rustc_mir] fn mir<'a>(d: Droppable<'a>) { let (mut a, mut b) = (false, false); let y = Droppable(&mut a, 2); diff --git a/src/test/run-fail/mir_indexing_oob_1.rs b/src/test/run-fail/mir_indexing_oob_1.rs index e0d20a20577a8..41ff466f810ea 100644 --- a/src/test/run-fail/mir_indexing_oob_1.rs +++ b/src/test/run-fail/mir_indexing_oob_1.rs @@ -9,11 +9,9 @@ // except according to those terms. // error-pattern:index out of bounds: the len is 5 but the index is 10 -#![feature(rustc_attrs)] const C: [u32; 5] = [0; 5]; -#[rustc_mir] fn test() -> u32 { C[10] } diff --git a/src/test/run-fail/mir_indexing_oob_2.rs b/src/test/run-fail/mir_indexing_oob_2.rs index 6c65be5769f2d..c5c823428bc94 100644 --- a/src/test/run-fail/mir_indexing_oob_2.rs +++ b/src/test/run-fail/mir_indexing_oob_2.rs @@ -9,11 +9,9 @@ // except according to those terms. // error-pattern:index out of bounds: the len is 5 but the index is 10 -#![feature(rustc_attrs)] const C: &'static [u8; 5] = b"hello"; -#[rustc_mir] fn test() -> u8 { C[10] } diff --git a/src/test/run-fail/mir_indexing_oob_3.rs b/src/test/run-fail/mir_indexing_oob_3.rs index 5f3fc9376b0d3..9bc4b0025e55a 100644 --- a/src/test/run-fail/mir_indexing_oob_3.rs +++ b/src/test/run-fail/mir_indexing_oob_3.rs @@ -9,11 +9,9 @@ // except according to those terms. // error-pattern:index out of bounds: the len is 5 but the index is 10 -#![feature(rustc_attrs)] const C: &'static [u8; 5] = b"hello"; -#[rustc_mir] fn mir() -> u8 { C[10] } diff --git a/src/test/run-fail/mir_trans_calls_converging_drops.rs b/src/test/run-fail/mir_trans_calls_converging_drops.rs index 5927d802b4560..7a7526c5fc1d3 100644 --- a/src/test/run-fail/mir_trans_calls_converging_drops.rs +++ b/src/test/run-fail/mir_trans_calls_converging_drops.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - // error-pattern:converging_fn called // error-pattern:0 dropped // error-pattern:exit @@ -27,7 +25,6 @@ fn converging_fn() { write!(io::stderr(), "converging_fn called\n"); } -#[rustc_mir] fn mir(d: Droppable) { converging_fn(); } diff --git a/src/test/run-fail/mir_trans_calls_converging_drops_2.rs b/src/test/run-fail/mir_trans_calls_converging_drops_2.rs index 96a46f47eb565..1301630cc85ea 100644 --- a/src/test/run-fail/mir_trans_calls_converging_drops_2.rs +++ b/src/test/run-fail/mir_trans_calls_converging_drops_2.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - // error-pattern:complex called // error-pattern:dropped // error-pattern:exit @@ -30,7 +28,6 @@ fn complex() -> u64 { } -#[rustc_mir] fn mir() -> u64 { let x = Droppable; return complex(); diff --git a/src/test/run-fail/mir_trans_calls_diverging.rs b/src/test/run-fail/mir_trans_calls_diverging.rs index fcd8ab26a0a88..9dbf7de0d2d49 100644 --- a/src/test/run-fail/mir_trans_calls_diverging.rs +++ b/src/test/run-fail/mir_trans_calls_diverging.rs @@ -7,14 +7,13 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] + // error-pattern:diverging_fn called fn diverging_fn() -> ! { panic!("diverging_fn called") } -#[rustc_mir] fn mir() { diverging_fn(); } diff --git a/src/test/run-fail/mir_trans_calls_diverging_drops.rs b/src/test/run-fail/mir_trans_calls_diverging_drops.rs index 89b53b18f0619..c191870492969 100644 --- a/src/test/run-fail/mir_trans_calls_diverging_drops.rs +++ b/src/test/run-fail/mir_trans_calls_diverging_drops.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - // error-pattern:diverging_fn called // error-pattern:0 dropped @@ -26,7 +24,6 @@ fn diverging_fn() -> ! { panic!("diverging_fn called") } -#[rustc_mir] fn mir(d: Droppable) { diverging_fn(); } diff --git a/src/test/run-fail/mir_trans_no_landing_pads.rs b/src/test/run-fail/mir_trans_no_landing_pads.rs index bc913fdab1c07..dacb039d89dc5 100644 --- a/src/test/run-fail/mir_trans_no_landing_pads.rs +++ b/src/test/run-fail/mir_trans_no_landing_pads.rs @@ -7,7 +7,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] + // compile-flags: -Z no-landing-pads // error-pattern:converging_fn called use std::io::{self, Write}; @@ -23,7 +23,6 @@ fn converging_fn() { panic!("converging_fn called") } -#[rustc_mir] fn mir(d: Droppable) { let x = Droppable; converging_fn(); diff --git a/src/test/run-fail/mir_trans_no_landing_pads_diverging.rs b/src/test/run-fail/mir_trans_no_landing_pads_diverging.rs index d97eb8c89e3e0..87037c1efed9e 100644 --- a/src/test/run-fail/mir_trans_no_landing_pads_diverging.rs +++ b/src/test/run-fail/mir_trans_no_landing_pads_diverging.rs @@ -7,7 +7,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] + // compile-flags: -Z no-landing-pads // error-pattern:diverging_fn called use std::io::{self, Write}; @@ -23,7 +23,6 @@ fn diverging_fn() -> ! { panic!("diverging_fn called") } -#[rustc_mir] fn mir(d: Droppable) { let x = Droppable; diverging_fn(); diff --git a/src/test/run-pass-fulldeps/mir-pass.rs b/src/test/run-pass-fulldeps/mir-pass.rs index acf11e241033c..8ac4bf9733757 100644 --- a/src/test/run-pass-fulldeps/mir-pass.rs +++ b/src/test/run-pass-fulldeps/mir-pass.rs @@ -11,10 +11,9 @@ // aux-build:dummy_mir_pass.rs // ignore-stage1 -#![feature(plugin, rustc_attrs)] +#![feature(plugin)] #![plugin(dummy_mir_pass)] -#[rustc_mir] fn math() -> i32 { 11 } diff --git a/src/test/run-pass-valgrind/cast-enum-with-dtor.rs b/src/test/run-pass-valgrind/cast-enum-with-dtor.rs index 0de949471c684..7cf75924a28c0 100644 --- a/src/test/run-pass-valgrind/cast-enum-with-dtor.rs +++ b/src/test/run-pass-valgrind/cast-enum-with-dtor.rs @@ -13,7 +13,7 @@ // no-prefer-dynamic #![allow(dead_code)] -#![feature(const_fn, rustc_attrs)] +#![feature(const_fn)] // check dtor calling order when casting enums. @@ -38,7 +38,6 @@ impl Drop for E { } } -#[rustc_no_mir] // FIXME #27840 MIR miscompiles this. fn main() { assert_eq!(FLAG.load(Ordering::SeqCst), 0); { @@ -46,5 +45,5 @@ fn main() { assert_eq!(e as u32, 2); assert_eq!(FLAG.load(Ordering::SeqCst), 0); } - assert_eq!(FLAG.load(Ordering::SeqCst), 1); + assert_eq!(FLAG.load(Ordering::SeqCst), 0); } diff --git a/src/test/run-pass/auxiliary/issue-10028.rs b/src/test/run-pass/auxiliary/issue-10028.rs index a21deb44fcc83..ed42ad6e87527 100644 --- a/src/test/run-pass/auxiliary/issue-10028.rs +++ b/src/test/run-pass/auxiliary/issue-10028.rs @@ -8,9 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(unsafe_no_drop_flag)] - -#[unsafe_no_drop_flag] pub struct ZeroLengthThingWithDestructor; impl Drop for ZeroLengthThingWithDestructor { fn drop(&mut self) {} diff --git a/src/test/run-pass/drop-flag-sanity-check.rs b/src/test/run-pass/drop-flag-sanity-check.rs deleted file mode 100644 index a8014768d7847..0000000000000 --- a/src/test/run-pass/drop-flag-sanity-check.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// compile-flags: -Z force-dropflag-checks=on -// ignore-emscripten - -// Quick-and-dirty test to ensure -Z force-dropflag-checks=on works as -// expected. Note that the inlined drop-flag is slated for removal -// (RFC 320); when that happens, the -Z flag and this test should -// simply be removed. -// -// See also drop-flag-skip-sanity-check.rs. - -use std::env; -use std::process::Command; - -fn main() { - let args: Vec = env::args().collect(); - if args.len() > 1 && args[1] == "test" { - return test(); - } - - let mut p = Command::new(&args[0]).arg("test").spawn().unwrap(); - // The invocation should fail due to the drop-flag sanity check. - assert!(!p.wait().unwrap().success()); -} - -#[derive(Debug)] -struct Corrupted { - x: u8 -} - -impl Drop for Corrupted { - fn drop(&mut self) { println!("dropping"); } -} - -fn test() { - { - let mut c1 = Corrupted { x: 1 }; - let mut c2 = Corrupted { x: 2 }; - unsafe { - let p1 = &mut c1 as *mut Corrupted as *mut u8; - let p2 = &mut c2 as *mut Corrupted as *mut u8; - for i in 0..std::mem::size_of::() { - // corrupt everything, *including the drop flag. - // - // (We corrupt via two different means to safeguard - // against the hypothetical assignment of the - // dtor_needed/dtor_done values to v and v+k. that - // happen to match with one of the corruption values - // below.) - *p1.offset(i as isize) += 2; - *p2.offset(i as isize) += 3; - } - } - // Here, at the end of the scope of `c1` and `c2`, the - // drop-glue should detect the corruption of (at least one of) - // the drop-flags. - } - println!("We should never get here."); -} diff --git a/src/test/run-pass/drop-flag-skip-sanity-check.rs b/src/test/run-pass/drop-flag-skip-sanity-check.rs deleted file mode 100644 index 07a10c8d45443..0000000000000 --- a/src/test/run-pass/drop-flag-skip-sanity-check.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// compile-flags: -Z force-dropflag-checks=off -// ignore-emscripten no threads support - -// Quick-and-dirty test to ensure -Z force-dropflag-checks=off works as -// expected. Note that the inlined drop-flag is slated for removal -// (RFC 320); when that happens, the -Z flag and this test should -// simply be removed. -// -// See also drop-flag-sanity-check.rs. - -use std::env; -use std::process::Command; - -fn main() { - let args: Vec = env::args().collect(); - if args.len() > 1 && args[1] == "test" { - return test(); - } - - let s = Command::new(&args[0]).arg("test").status().unwrap(); - // Invocatinn should succeed as drop-flag sanity check is skipped. - assert!(s.success()); -} - -#[derive(Debug)] -struct Corrupted { - x: u8 -} - -impl Drop for Corrupted { - fn drop(&mut self) { println!("dropping"); } -} - -fn test() { - { - let mut c1 = Corrupted { x: 1 }; - let mut c2 = Corrupted { x: 2 }; - unsafe { - let p1 = &mut c1 as *mut Corrupted as *mut u8; - let p2 = &mut c2 as *mut Corrupted as *mut u8; - for i in 0..std::mem::size_of::() { - // corrupt everything, *including the drop flag. - // - // (We corrupt via two different means to safeguard - // against the hypothetical assignment of the - // dtor_needed/dtor_done values to v and v+k. that - // happen to match with one of the corruption values - // below.) - *p1.offset(i as isize) += 2; - *p2.offset(i as isize) += 3; - } - } - // Here, at the end of the scope of `c1` and `c2`, the - // drop-glue should detect the corruption of (at least one of) - // the drop-flags. - } -} diff --git a/src/test/run-pass/dynamic-drop.rs b/src/test/run-pass/dynamic-drop.rs index f917531e868f1..2b016dfb33eca 100644 --- a/src/test/run-pass/dynamic-drop.rs +++ b/src/test/run-pass/dynamic-drop.rs @@ -74,7 +74,6 @@ impl<'a> Drop for Ptr<'a> { } } -#[rustc_mir] fn dynamic_init(a: &Allocator, c: bool) { let _x; if c { @@ -82,7 +81,6 @@ fn dynamic_init(a: &Allocator, c: bool) { } } -#[rustc_mir] fn dynamic_drop(a: &Allocator, c: bool) { let x = a.alloc(); if c { @@ -92,7 +90,6 @@ fn dynamic_drop(a: &Allocator, c: bool) { }; } -#[rustc_mir] fn assignment2(a: &Allocator, c0: bool, c1: bool) { let mut _v = a.alloc(); let mut _w = a.alloc(); @@ -105,7 +102,6 @@ fn assignment2(a: &Allocator, c0: bool, c1: bool) { } } -#[rustc_mir] fn assignment1(a: &Allocator, c0: bool) { let mut _v = a.alloc(); let mut _w = a.alloc(); diff --git a/src/test/run-pass/exhaustive-bool-match-sanity.rs b/src/test/run-pass/exhaustive-bool-match-sanity.rs index d88a5f12e303d..27bcab43229c5 100644 --- a/src/test/run-pass/exhaustive-bool-match-sanity.rs +++ b/src/test/run-pass/exhaustive-bool-match-sanity.rs @@ -15,9 +15,6 @@ // sanity in that we generate an if-else chain giving the correct // results. -#![feature(rustc_attrs)] - -#[rustc_mir] fn foo(x: bool, y: bool) -> u32 { match (x, y) { (false, _) => 0, diff --git a/src/test/run-pass/intrinsic-move-val.rs b/src/test/run-pass/intrinsic-move-val.rs index eb482b3230a43..ea42b59f1f2b8 100644 --- a/src/test/run-pass/intrinsic-move-val.rs +++ b/src/test/run-pass/intrinsic-move-val.rs @@ -55,15 +55,6 @@ pub fn main() { // compiler is hidden. rusti::move_val_init(&mut y, x); - // In particular, it may be tracked via a drop-flag embedded - // in the value, or via a null pointer, or via - // mem::POST_DROP_USIZE, or (most preferably) via a - // stack-local drop flag. - // - // (This test used to build-in knowledge of how it was - // tracked, and check that the underlying stack slot had been - // set to `mem::POST_DROP_USIZE`.) - // But what we *can* observe is how many times the destructor // for `D` is invoked, and what the last value we saw was // during such a destructor call. We do so after the end of diff --git a/src/test/run-pass/issue-10734.rs b/src/test/run-pass/issue-10734.rs index 3dc96ecde1c07..a521e5d4b6c34 100644 --- a/src/test/run-pass/issue-10734.rs +++ b/src/test/run-pass/issue-10734.rs @@ -8,12 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -#![feature(unsafe_no_drop_flag)] - static mut drop_count: usize = 0; -#[unsafe_no_drop_flag] struct Foo { dropped: bool } diff --git a/src/test/run-pass/issue-16648.rs b/src/test/run-pass/issue-16648.rs index e596bee8bfe9f..e1b94179764bf 100644 --- a/src/test/run-pass/issue-16648.rs +++ b/src/test/run-pass/issue-16648.rs @@ -9,9 +9,8 @@ // except according to those terms. -#![feature(slice_patterns, rustc_attrs)] +#![feature(slice_patterns)] -#[rustc_mir] fn main() { let x: (isize, &[isize]) = (2, &[1, 2]); assert_eq!(match x { diff --git a/src/test/run-pass/issue-2895.rs b/src/test/run-pass/issue-2895.rs index 5587f68bd1854..b0fd0e148c8c2 100644 --- a/src/test/run-pass/issue-2895.rs +++ b/src/test/run-pass/issue-2895.rs @@ -26,11 +26,11 @@ impl Drop for Kitty { #[cfg(target_pointer_width = "64")] pub fn main() { assert_eq!(mem::size_of::(), 8 as usize); - assert_eq!(mem::size_of::(), 16 as usize); + assert_eq!(mem::size_of::(), 8 as usize); } #[cfg(target_pointer_width = "32")] pub fn main() { assert_eq!(mem::size_of::(), 4 as usize); - assert_eq!(mem::size_of::(), 8 as usize); + assert_eq!(mem::size_of::(), 4 as usize); } diff --git a/src/test/run-pass/issue-28950.rs b/src/test/run-pass/issue-28950.rs index a905727afff4f..a70c2b3ae1b7b 100644 --- a/src/test/run-pass/issue-28950.rs +++ b/src/test/run-pass/issue-28950.rs @@ -9,19 +9,23 @@ // except according to those terms. // ignore-emscripten -// compile-flags: -Z orbit=off -// (blows the stack with MIR trans and no optimizations) +// compile-flags: -O // Tests that the `vec!` macro does not overflow the stack when it is // given data larger than the stack. +// FIXME(eddyb) Improve unoptimized codegen to avoid the temporary, +// and thus run successfully even when compiled at -C opt-level=0. + const LEN: usize = 1 << 15; use std::thread::Builder; fn main() { assert!(Builder::new().stack_size(LEN / 2).spawn(|| { - let vec = vec![[0; LEN]]; + // FIXME(eddyb) this can be vec![[0: LEN]] pending + // https://llvm.org/bugs/show_bug.cgi?id=28987 + let vec = vec![unsafe { std::mem::zeroed::<[u8; LEN]>() }]; assert_eq!(vec.len(), 1); }).unwrap().join().is_ok()); } diff --git a/src/test/run-pass/issue-32805.rs b/src/test/run-pass/issue-32805.rs index ea49cf3e7bedb..b7ff63b75ce88 100644 --- a/src/test/run-pass/issue-32805.rs +++ b/src/test/run-pass/issue-32805.rs @@ -8,19 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - -#[rustc_mir] fn const_mir() -> f32 { 9007199791611905.0 } -#[rustc_no_mir] -fn const_old() -> f32 { 9007199791611905.0 } - fn main() { let original = "9007199791611905.0"; // (1<<53)+(1<<29)+1 let expected = "9007200000000000"; assert_eq!(const_mir().to_string(), expected); - assert_eq!(const_old().to_string(), expected); assert_eq!(original.parse::().unwrap().to_string(), expected); } diff --git a/src/test/run-pass/issue-33387.rs b/src/test/run-pass/issue-33387.rs index a4b85bc7a091d..d30e88b3968c7 100644 --- a/src/test/run-pass/issue-33387.rs +++ b/src/test/run-pass/issue-33387.rs @@ -24,17 +24,14 @@ impl Foo for [u8; 2] { struct Bar(T); -#[rustc_mir] fn unsize_fat_ptr<'a>(x: &'a Bar) -> &'a Bar { x } -#[rustc_mir] fn unsize_nested_fat_ptr(x: Arc) -> Arc { x } -#[rustc_mir] fn main() { let x: Box> = Box::new(Bar([1,2])); assert_eq!(unsize_fat_ptr(&*x).0.get(), [1, 2]); diff --git a/src/test/run-pass/issue-7784.rs b/src/test/run-pass/issue-7784.rs index 0008825226ba0..badc013cd621f 100644 --- a/src/test/run-pass/issue-7784.rs +++ b/src/test/run-pass/issue-7784.rs @@ -11,7 +11,6 @@ #![feature(advanced_slice_patterns)] #![feature(slice_patterns)] -#![feature(rustc_attrs)] use std::ops::Add; @@ -22,7 +21,6 @@ fn bar(a: &'static str, b: &'static str) -> [&'static str; 4] { [a, b, b, a] } -#[rustc_mir] fn main() { assert_eq!(foo([1, 2, 3]), (1, 3, 6)); diff --git a/src/test/run-pass/match-vec-alternatives.rs b/src/test/run-pass/match-vec-alternatives.rs index 010c145521008..fa609593c24b6 100644 --- a/src/test/run-pass/match-vec-alternatives.rs +++ b/src/test/run-pass/match-vec-alternatives.rs @@ -11,9 +11,7 @@ #![feature(advanced_slice_patterns)] #![feature(slice_patterns)] -#![feature(rustc_attrs)] -#[rustc_mir] fn match_vecs<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str { match (l1, l2) { (&[], &[]) => "both empty", @@ -22,7 +20,6 @@ fn match_vecs<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str { } } -#[rustc_mir] fn match_vecs_cons<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str { match (l1, l2) { (&[], &[]) => "both empty", @@ -31,7 +28,6 @@ fn match_vecs_cons<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str { } } -#[rustc_mir] fn match_vecs_snoc<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str { match (l1, l2) { (&[], &[]) => "both empty", @@ -40,7 +36,6 @@ fn match_vecs_snoc<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str { } } -#[rustc_mir] fn match_nested_vecs_cons<'a, T>(l1: Option<&'a [T]>, l2: Result<&'a [T], ()>) -> &'static str { match (l1, l2) { (Some(&[]), Ok(&[])) => "Some(empty), Ok(empty)", @@ -51,7 +46,6 @@ fn match_nested_vecs_cons<'a, T>(l1: Option<&'a [T]>, l2: Result<&'a [T], ()>) - } } -#[rustc_mir] fn match_nested_vecs_snoc<'a, T>(l1: Option<&'a [T]>, l2: Result<&'a [T], ()>) -> &'static str { match (l1, l2) { (Some(&[]), Ok(&[])) => "Some(empty), Ok(empty)", diff --git a/src/test/run-pass/mir_adt_construction.rs b/src/test/run-pass/mir_adt_construction.rs index 4526c40af84cf..dae843bba9fa6 100644 --- a/src/test/run-pass/mir_adt_construction.rs +++ b/src/test/run-pass/mir_adt_construction.rs @@ -8,15 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - #[repr(C, u32)] enum CEnum { Hello = 30, World = 60 } -#[rustc_mir] fn test1(c: CEnum) -> i32 { let c2 = CEnum::Hello; match (c, c2) { @@ -40,7 +37,6 @@ impl Drop for Pakd { fn drop(&mut self) {} } -#[rustc_mir] fn test2() -> Pakd { Pakd { a: 42, b: 42, c: 42, d: 42, e: () } } @@ -48,18 +44,15 @@ fn test2() -> Pakd { #[derive(PartialEq, Debug)] struct TupleLike(u64, u32); -#[rustc_mir] fn test3() -> TupleLike { TupleLike(42, 42) } -#[rustc_mir] fn test4(x: fn(u64, u32) -> TupleLike) -> (TupleLike, TupleLike) { let y = TupleLike; (x(42, 84), y(42, 84)) } -#[rustc_mir] fn test5(x: fn(u32) -> Option) -> (Option, Option) { let y = Some; (x(42), y(42)) diff --git a/src/test/run-pass/mir_ascription_coercion.rs b/src/test/run-pass/mir_ascription_coercion.rs index b227be9c543b2..bc1013429aa59 100644 --- a/src/test/run-pass/mir_ascription_coercion.rs +++ b/src/test/run-pass/mir_ascription_coercion.rs @@ -10,9 +10,8 @@ // Tests that the result of type ascription has adjustments applied -#![feature(rustc_attrs, type_ascription)] +#![feature(type_ascription)] -#[rustc_mir] fn main() { let x = [1, 2, 3]; // The RHS should coerce to &[i32] diff --git a/src/test/run-pass/mir_augmented_assignments.rs b/src/test/run-pass/mir_augmented_assignments.rs index dcfa569a933e8..bb90f25fce5f4 100644 --- a/src/test/run-pass/mir_augmented_assignments.rs +++ b/src/test/run-pass/mir_augmented_assignments.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - use std::mem; use std::ops::{ AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, DivAssign, MulAssign, RemAssign, @@ -33,7 +31,6 @@ fn main() { main_mir(); } -#[rustc_mir] fn main_mir() { let mut x = Int(1); @@ -92,91 +89,78 @@ fn main_mir() { } impl AddAssign for Int { - #[rustc_mir] fn add_assign(&mut self, rhs: Int) { self.0 += rhs.0; } } impl BitAndAssign for Int { - #[rustc_mir] fn bitand_assign(&mut self, rhs: Int) { self.0 &= rhs.0; } } impl BitOrAssign for Int { - #[rustc_mir] fn bitor_assign(&mut self, rhs: Int) { self.0 |= rhs.0; } } impl BitXorAssign for Int { - #[rustc_mir] fn bitxor_assign(&mut self, rhs: Int) { self.0 ^= rhs.0; } } impl DivAssign for Int { - #[rustc_mir] fn div_assign(&mut self, rhs: Int) { self.0 /= rhs.0; } } impl MulAssign for Int { - #[rustc_mir] fn mul_assign(&mut self, rhs: Int) { self.0 *= rhs.0; } } impl RemAssign for Int { - #[rustc_mir] fn rem_assign(&mut self, rhs: Int) { self.0 %= rhs.0; } } impl ShlAssign for Int { - #[rustc_mir] fn shl_assign(&mut self, rhs: u8) { self.0 <<= rhs; } } impl ShlAssign for Int { - #[rustc_mir] fn shl_assign(&mut self, rhs: u16) { self.0 <<= rhs; } } impl ShrAssign for Int { - #[rustc_mir] fn shr_assign(&mut self, rhs: u8) { self.0 >>= rhs; } } impl ShrAssign for Int { - #[rustc_mir] fn shr_assign(&mut self, rhs: u16) { self.0 >>= rhs; } } impl SubAssign for Int { - #[rustc_mir] fn sub_assign(&mut self, rhs: Int) { self.0 -= rhs.0; } } impl AddAssign for Slice { - #[rustc_mir] fn add_assign(&mut self, rhs: i32) { for lhs in &mut self.0 { *lhs += rhs; diff --git a/src/test/run-pass/mir_autoderef.rs b/src/test/run-pass/mir_autoderef.rs index 81712e4569f06..f0032fee2835e 100644 --- a/src/test/run-pass/mir_autoderef.rs +++ b/src/test/run-pass/mir_autoderef.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - use std::ops::{Deref, DerefMut}; pub struct MyRef(u32); @@ -24,12 +22,10 @@ impl DerefMut for MyRef { } -#[rustc_mir] fn deref(x: &MyRef) -> &u32 { x } -#[rustc_mir] fn deref_mut(x: &mut MyRef) -> &mut u32 { x } diff --git a/src/test/run-pass/mir_boxing.rs b/src/test/run-pass/mir_boxing.rs index 1d635e9f778df..1c5134755d7aa 100644 --- a/src/test/run-pass/mir_boxing.rs +++ b/src/test/run-pass/mir_boxing.rs @@ -8,9 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs, box_syntax)] +#![feature(box_syntax)] -#[rustc_mir] fn test() -> Box { box 42 } diff --git a/src/test/run-pass/mir_build_match_comparisons.rs b/src/test/run-pass/mir_build_match_comparisons.rs index ad24e39d4f93f..b195ff63412af 100644 --- a/src/test/run-pass/mir_build_match_comparisons.rs +++ b/src/test/run-pass/mir_build_match_comparisons.rs @@ -8,9 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - -#[rustc_mir] fn test1(x: i8) -> i32 { match x { 1...10 => 0, @@ -21,7 +18,6 @@ fn test1(x: i8) -> i32 { const U: Option = Some(10); const S: &'static str = "hello"; -#[rustc_mir] fn test2(x: i8) -> i32 { match Some(x) { U => 0, @@ -29,7 +25,6 @@ fn test2(x: i8) -> i32 { } } -#[rustc_mir] fn test3(x: &'static str) -> i32 { match x { S => 0, @@ -42,7 +37,6 @@ enum Opt { None } -#[rustc_mir] fn test4(x: u64) -> i32 { let opt = Opt::Some{ v: x }; match opt { diff --git a/src/test/run-pass/mir_call_with_associated_type.rs b/src/test/run-pass/mir_call_with_associated_type.rs index 08401c275a52c..935d0e58985d7 100644 --- a/src/test/run-pass/mir_call_with_associated_type.rs +++ b/src/test/run-pass/mir_call_with_associated_type.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - trait Trait { type Type; } @@ -18,12 +16,10 @@ impl<'a> Trait for &'a () { type Type = u32; } -#[rustc_mir] fn foo<'a>(t: <&'a () as Trait>::Type) -> <&'a () as Trait>::Type { t } -#[rustc_mir] fn main() { assert_eq!(foo(4), 4); } diff --git a/src/test/run-pass/mir_cast_fn_ret.rs b/src/test/run-pass/mir_cast_fn_ret.rs index 8a723967aff5f..311d5451eb6dd 100644 --- a/src/test/run-pass/mir_cast_fn_ret.rs +++ b/src/test/run-pass/mir_cast_fn_ret.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - pub extern "C" fn tuple2() -> (u16, u8) { (1, 2) } @@ -18,12 +16,10 @@ pub extern "C" fn tuple3() -> (u8, u8, u8) { (1, 2, 3) } -#[rustc_mir] pub fn test2() -> u8 { tuple2().1 } -#[rustc_mir] pub fn test3() -> u8 { tuple3().2 } diff --git a/src/test/run-pass/mir_coercion_casts.rs b/src/test/run-pass/mir_coercion_casts.rs index 4d5c59276d750..2be2854fac959 100644 --- a/src/test/run-pass/mir_coercion_casts.rs +++ b/src/test/run-pass/mir_coercion_casts.rs @@ -10,9 +10,6 @@ // Tests the coercion casts are handled properly -#![feature(rustc_attrs)] - -#[rustc_mir] fn main() { // This should produce only a reification of f, // not a fn -> fn cast as well diff --git a/src/test/run-pass/mir_coercions.rs b/src/test/run-pass/mir_coercions.rs index 09dd52e30bef9..79d1cfde7cd58 100644 --- a/src/test/run-pass/mir_coercions.rs +++ b/src/test/run-pass/mir_coercions.rs @@ -8,16 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs, coerce_unsized, unsize)] +#![feature(coerce_unsized, unsize)] use std::ops::CoerceUnsized; use std::marker::Unsize; -#[rustc_mir] fn identity_coercion(x: &(Fn(u32)->u32 + Send)) -> &Fn(u32)->u32 { x } -#[rustc_mir] fn fn_coercions(f: &fn(u32) -> u32) -> (unsafe fn(u32) -> u32, &(Fn(u32) -> u32+Send)) @@ -25,7 +23,6 @@ fn fn_coercions(f: &fn(u32) -> u32) -> (*f, f) } -#[rustc_mir] fn simple_array_coercion(x: &[u8; 3]) -> &[u8] { x } fn square(a: u32) -> u32 { a * a } @@ -39,23 +36,19 @@ struct TrivPtrWrapper<'a, T: 'a+?Sized>(&'a T); impl<'a, T: ?Sized+Unsize, U: ?Sized> CoerceUnsized> for TrivPtrWrapper<'a, T> {} -#[rustc_mir] fn coerce_ptr_wrapper(p: PtrWrapper<[u8; 3]>) -> PtrWrapper<[u8]> { p } -#[rustc_mir] fn coerce_triv_ptr_wrapper(p: TrivPtrWrapper<[u8; 3]>) -> TrivPtrWrapper<[u8]> { p } -#[rustc_mir] fn coerce_fat_ptr_wrapper(p: PtrWrapper u32+Send>) -> PtrWrapper u32> { p } -#[rustc_mir] fn coerce_ptr_wrapper_poly<'a, T, Trait: ?Sized>(p: PtrWrapper<'a, T>) -> PtrWrapper<'a, Trait> where PtrWrapper<'a, T>: CoerceUnsized> diff --git a/src/test/run-pass/mir_constval_adts.rs b/src/test/run-pass/mir_constval_adts.rs index 0ce9e88ef3dbe..696ff8a7e600f 100644 --- a/src/test/run-pass/mir_constval_adts.rs +++ b/src/test/run-pass/mir_constval_adts.rs @@ -7,7 +7,6 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] #[derive(PartialEq, Debug)] struct Point { @@ -23,7 +22,6 @@ const TUPLE1: (i32, i32) = (42, 42); const TUPLE2: (&'static str, &'static str) = ("hello","world"); const PAIR_NEWTYPE: (Newtype, Newtype) = (Newtype(42), Newtype(42)); -#[rustc_mir] fn mir() -> (Point, (i32, i32), (&'static str, &'static str), (Newtype, Newtype)) { let struct1 = STRUCT; let tuple1 = TUPLE1; @@ -34,7 +32,6 @@ fn mir() -> (Point, (i32, i32), (&'static str, &'static str), (Newtype, New const NEWTYPE: Newtype<&'static str> = Newtype("foobar"); -#[rustc_mir] fn test_promoted_newtype_str_ref() { let x = &NEWTYPE; assert_eq!(x, &Newtype("foobar")); diff --git a/src/test/run-pass/mir_cross_crate.rs b/src/test/run-pass/mir_cross_crate.rs deleted file mode 100644 index cc239d9f68b13..0000000000000 --- a/src/test/run-pass/mir_cross_crate.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// compile-flags: -Z orbit -// Tests that -Z orbit affects functions from other crates. - -#![feature(unsafe_no_drop_flag)] - -#[unsafe_no_drop_flag] -struct Foo; - -impl Drop for Foo { - fn drop(&mut self) { - panic!("MIR trans is not enabled for mem::forget"); - } -} - -fn main() { - let x = Foo; - std::mem::forget(x); -} diff --git a/src/test/run-pass/mir_fat_ptr.rs b/src/test/run-pass/mir_fat_ptr.rs index 9a4267bec925c..e5c9e3577d1c3 100644 --- a/src/test/run-pass/mir_fat_ptr.rs +++ b/src/test/run-pass/mir_fat_ptr.rs @@ -10,46 +10,37 @@ // test that ordinary fat pointer operations work. -#![feature(rustc_attrs)] - struct Wrapper(u32, T); struct FatPtrContainer<'a> { ptr: &'a [u8] } -#[rustc_mir] fn fat_ptr_project(a: &Wrapper<[u8]>) -> &[u8] { &a.1 } -#[rustc_mir] fn fat_ptr_simple(a: &[u8]) -> &[u8] { a } -#[rustc_mir] fn fat_ptr_via_local(a: &[u8]) -> &[u8] { let x = a; x } -#[rustc_mir] fn fat_ptr_from_struct(s: FatPtrContainer) -> &[u8] { s.ptr } -#[rustc_mir] fn fat_ptr_to_struct(a: &[u8]) -> FatPtrContainer { FatPtrContainer { ptr: a } } -#[rustc_mir] fn fat_ptr_store_to<'a>(a: &'a [u8], b: &mut &'a [u8]) { *b = a; } -#[rustc_mir] fn fat_ptr_constant() -> &'static str { "HELLO" } diff --git a/src/test/run-pass/mir_fat_ptr_drop.rs b/src/test/run-pass/mir_fat_ptr_drop.rs index 3f79be0479391..64e68c78c3ca6 100644 --- a/src/test/run-pass/mir_fat_ptr_drop.rs +++ b/src/test/run-pass/mir_fat_ptr_drop.rs @@ -27,7 +27,6 @@ impl Drop for DropMe { } } -#[rustc_mir] fn fat_ptr_move_then_drop(a: Box<[DropMe]>) { let b = a; } diff --git a/src/test/run-pass/mir_match_arm_guard.rs b/src/test/run-pass/mir_match_arm_guard.rs index fb177ba7b2bb0..487999e6ed62b 100644 --- a/src/test/run-pass/mir_match_arm_guard.rs +++ b/src/test/run-pass/mir_match_arm_guard.rs @@ -10,9 +10,6 @@ // #30527 - We were not generating arms with guards in certain cases. -#![feature(rustc_attrs)] - -#[rustc_mir] fn match_with_guard(x: Option) -> i8 { match x { Some(xyz) if xyz > 100 => 0, diff --git a/src/test/run-pass/mir_misc_casts.rs b/src/test/run-pass/mir_misc_casts.rs index 0799ffebe69e5..ae719ac2800ee 100644 --- a/src/test/run-pass/mir_misc_casts.rs +++ b/src/test/run-pass/mir_misc_casts.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(libc, rustc_attrs)] +#![feature(libc)] extern crate libc; @@ -17,7 +17,6 @@ fn func(){} const STR: &'static str = "hello"; const BSTR: &'static [u8; 5] = b"hello"; -#[rustc_mir] fn from_ptr() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, *const ()) { let f = 1_usize as *const libc::FILE; @@ -35,7 +34,6 @@ fn from_ptr() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11) } -#[rustc_mir] fn from_1() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1 as isize; @@ -54,7 +52,6 @@ fn from_1() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1usize() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_usize as isize; @@ -73,7 +70,6 @@ fn from_1usize() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1isize() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_isize as isize; @@ -92,7 +88,6 @@ fn from_1isize() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1u8() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_u8 as isize; @@ -111,7 +106,6 @@ fn from_1u8() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1i8() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_i8 as isize; @@ -130,7 +124,6 @@ fn from_1i8() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1u16() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_u16 as isize; @@ -149,7 +142,6 @@ fn from_1u16() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1i16() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_i16 as isize; @@ -168,7 +160,6 @@ fn from_1i16() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1u32() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_u32 as isize; @@ -187,7 +178,6 @@ fn from_1u32() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1i32() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_i32 as isize; @@ -206,7 +196,6 @@ fn from_1i32() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1u64() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_u64 as isize; @@ -225,7 +214,6 @@ fn from_1u64() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_1i64() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) { let c1 = 1_i64 as isize; @@ -244,7 +232,6 @@ fn from_1i64() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13) } -#[rustc_mir] fn from_bool() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64) { let c1 = true as isize; @@ -260,7 +247,6 @@ fn from_bool() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10) } -#[rustc_mir] fn from_1f32() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64) { let c1 = 1.0_f32 as isize; @@ -278,7 +264,6 @@ fn from_1f32() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12) } -#[rustc_mir] fn from_1f64() -> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64) { let c1 = 1.0f64 as isize; @@ -296,7 +281,6 @@ fn from_1f64() (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12) } -#[rustc_mir] fn other_casts() -> (*const u8, *const isize, *const u8, *const u8) { let c1 = func as *const u8; diff --git a/src/test/run-pass/mir_overflow_off.rs b/src/test/run-pass/mir_overflow_off.rs index 04ac606a8a9a5..0db1e7b4563c1 100644 --- a/src/test/run-pass/mir_overflow_off.rs +++ b/src/test/run-pass/mir_overflow_off.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// compile-flags: -Z force-overflow-checks=off -Z orbit +// compile-flags: -Z force-overflow-checks=off // Test that with MIR trans, overflow checks can be // turned off, even when they're from core::ops::*. diff --git a/src/test/run-pass/mir_raw_fat_ptr.rs b/src/test/run-pass/mir_raw_fat_ptr.rs index a632f00d9ee5f..c9fd88f2fb3cf 100644 --- a/src/test/run-pass/mir_raw_fat_ptr.rs +++ b/src/test/run-pass/mir_raw_fat_ptr.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - // ignore-pretty : (#23623) problems when ending with // comments // check raw fat pointer ops in mir @@ -54,7 +52,6 @@ const GT: ComparisonResults = ComparisonResults { ne: true }; -#[rustc_mir] fn compare_su8(a: *const S<[u8]>, b: *const S<[u8]>) -> ComparisonResults { ComparisonResults { lt: a < b, @@ -66,7 +63,6 @@ fn compare_su8(a: *const S<[u8]>, b: *const S<[u8]>) -> ComparisonResults { } } -#[rustc_mir] fn compare_au8(a: *const [u8], b: *const [u8]) -> ComparisonResults { ComparisonResults { lt: a < b, @@ -78,7 +74,6 @@ fn compare_au8(a: *const [u8], b: *const [u8]) -> ComparisonResults { } } -#[rustc_mir] fn compare_foo<'a>(a: *const (Foo+'a), b: *const (Foo+'a)) -> ComparisonResults { ComparisonResults { lt: a < b, @@ -90,7 +85,6 @@ fn compare_foo<'a>(a: *const (Foo+'a), b: *const (Foo+'a)) -> ComparisonResults } } -#[rustc_mir] fn simple_eq<'a>(a: *const (Foo+'a), b: *const (Foo+'a)) -> bool { let result = a == b; result diff --git a/src/test/run-pass/mir_refs_correct.rs b/src/test/run-pass/mir_refs_correct.rs index 67baf2f9c49c1..df90fe2b7918d 100644 --- a/src/test/run-pass/mir_refs_correct.rs +++ b/src/test/run-pass/mir_refs_correct.rs @@ -7,9 +7,8 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] -// aux-build:mir_external_refs.rs +// aux-build:mir_external_refs.rs extern crate mir_external_refs as ext; @@ -78,128 +77,103 @@ fn parametric(u: T) -> T { u } -#[rustc_mir] fn t1() -> fn()->u8 { regular } -#[rustc_mir] fn t2() -> fn(u8)->E { E::U } -#[rustc_mir] fn t3() -> fn(u8)->S { S } -#[rustc_mir] fn t4() -> fn()->u8 { S::hey } -#[rustc_mir] fn t5() -> fn(&S)-> u8 { ::hoy } -#[rustc_mir] fn t6() -> fn()->u8{ ext::regular_fn } -#[rustc_mir] fn t7() -> fn(u8)->ext::E { ext::E::U } -#[rustc_mir] fn t8() -> fn(u8)->ext::S { ext::S } -#[rustc_mir] fn t9() -> fn()->u8 { ext::S::hey } -#[rustc_mir] fn t10() -> fn(&ext::S)->u8 { ::hoy } -#[rustc_mir] fn t11() -> fn(u8)->u8 { parametric } -#[rustc_mir] fn t12() -> u8 { C } -#[rustc_mir] fn t13() -> [u8; 5] { C2 } -#[rustc_mir] fn t13_2() -> [u8; 3] { C3 } -#[rustc_mir] fn t14() -> fn()-> u8 { ::hoy2 } -#[rustc_mir] fn t15() -> fn(&S)-> u8 { S::hey2 } -#[rustc_mir] fn t16() -> fn(u32, u32)->u64 { F::f } -#[rustc_mir] fn t17() -> fn(u32, u64)->u64 { F::f } -#[rustc_mir] fn t18() -> fn(u64, u64)->u64 { F::f } -#[rustc_mir] fn t19() -> fn(u64, u32)->u64 { F::f } -#[rustc_mir] fn t20() -> fn(u64, u32)->(u64, u32) { >::staticmeth } -#[rustc_mir] fn t21() -> Unit { Unit } -#[rustc_mir] fn t22() -> Option { None } -#[rustc_mir] fn t23() -> (CEnum, CEnum) { (CEnum::A, CEnum::B) } -#[rustc_mir] fn t24() -> fn(u8) -> S { C4 } diff --git a/src/test/run-pass/mir_small_agg_arg.rs b/src/test/run-pass/mir_small_agg_arg.rs index 8a0cb046b7a7d..639a585ae0013 100644 --- a/src/test/run-pass/mir_small_agg_arg.rs +++ b/src/test/run-pass/mir_small_agg_arg.rs @@ -8,9 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - -#[rustc_mir] fn foo((x, y): (i8, i8)) { } diff --git a/src/test/run-pass/mir_struct_with_assoc_ty.rs b/src/test/run-pass/mir_struct_with_assoc_ty.rs index 1f75369b94a86..7b2514c27c8cb 100644 --- a/src/test/run-pass/mir_struct_with_assoc_ty.rs +++ b/src/test/run-pass/mir_struct_with_assoc_ty.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - use std::marker::PhantomData; pub trait DataBind { @@ -26,7 +24,6 @@ pub struct Data { pub offsets: as DataBind>::Data, } -#[rustc_mir] fn create_data() -> Data { let mut d = Data { offsets: [1, 2] }; d.offsets[0] = 3; diff --git a/src/test/run-pass/mir_temp_promotions.rs b/src/test/run-pass/mir_temp_promotions.rs index de83c1f5ee0cd..4865e955091f8 100644 --- a/src/test/run-pass/mir_temp_promotions.rs +++ b/src/test/run-pass/mir_temp_promotions.rs @@ -8,9 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - -#[rustc_mir] fn test1(f: f32) -> bool { // test that we properly promote temporaries to allocas when a temporary is assigned to // multiple times (assignment is still happening once ∀ possible dataflows). diff --git a/src/test/run-pass/mir_trans_array.rs b/src/test/run-pass/mir_trans_array.rs index e6ffb89582512..b7f247012ce12 100644 --- a/src/test/run-pass/mir_trans_array.rs +++ b/src/test/run-pass/mir_trans_array.rs @@ -7,9 +7,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] -#[rustc_mir] fn into_inner() -> [u64; 1024] { let mut x = 10 + 20; [x; 1024] diff --git a/src/test/run-pass/mir_trans_array_2.rs b/src/test/run-pass/mir_trans_array_2.rs index 4aa686298e9ee..c7133fb0c0e49 100644 --- a/src/test/run-pass/mir_trans_array_2.rs +++ b/src/test/run-pass/mir_trans_array_2.rs @@ -7,9 +7,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] -#[rustc_mir] fn into_inner(x: u64) -> [u64; 1024] { [x; 2*4*8*16] } diff --git a/src/test/run-pass/mir_trans_call_converging.rs b/src/test/run-pass/mir_trans_call_converging.rs index d8acfec25c4b5..7d420bb86c607 100644 --- a/src/test/run-pass/mir_trans_call_converging.rs +++ b/src/test/run-pass/mir_trans_call_converging.rs @@ -7,13 +7,11 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] fn converging_fn() -> u64 { 43 } -#[rustc_mir] fn mir() -> u64 { let x; loop { diff --git a/src/test/run-pass/mir_trans_calls.rs b/src/test/run-pass/mir_trans_calls.rs index 7ff684a5ef392..d429c681bbe4a 100644 --- a/src/test/run-pass/mir_trans_calls.rs +++ b/src/test/run-pass/mir_trans_calls.rs @@ -8,9 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs, fn_traits)] +#![feature(fn_traits)] -#[rustc_mir] fn test1(a: isize, b: (i32, i32), c: &[i32]) -> (isize, (i32, i32), &[i32]) { // Test passing a number of arguments including a fat pointer. // Also returning via an out pointer @@ -20,7 +19,6 @@ fn test1(a: isize, b: (i32, i32), c: &[i32]) -> (isize, (i32, i32), &[i32]) { callee(a, b, c) } -#[rustc_mir] fn test2(a: isize) -> isize { // Test passing a single argument. // Not using out pointer. @@ -36,7 +34,6 @@ impl Foo { fn inherent_method(&self, a: isize) -> isize { a } } -#[rustc_mir] fn test3(x: &Foo, a: isize) -> isize { // Test calling inherent method x.inherent_method(a) @@ -47,19 +44,16 @@ trait Bar { } impl Bar for Foo {} -#[rustc_mir] fn test4(x: &Foo, a: isize) -> isize { // Test calling extension method x.extension_method(a) } -#[rustc_mir] fn test5(x: &Bar, a: isize) -> isize { // Test calling method on trait object x.extension_method(a) } -#[rustc_mir] fn test6(x: &T, a: isize) -> isize { // Test calling extension method on generic callee x.extension_method(a) @@ -72,7 +66,6 @@ impl One for isize { fn one() -> isize { 1 } } -#[rustc_mir] fn test7() -> isize { // Test calling trait static method ::one() @@ -83,7 +76,6 @@ impl Two { fn two() -> isize { 2 } } -#[rustc_mir] fn test8() -> isize { // Test calling impl static method Two::two() @@ -93,24 +85,20 @@ extern fn simple_extern(x: u32, y: (u32, u32)) -> u32 { x + y.0 * y.1 } -#[rustc_mir] fn test9() -> u32 { simple_extern(41, (42, 43)) } -#[rustc_mir] fn test_closure(f: &F, x: i32, y: i32) -> i32 where F: Fn(i32, i32) -> i32 { f(x, y) } -#[rustc_mir] fn test_fn_object(f: &Fn(i32, i32) -> i32, x: i32, y: i32) -> i32 { f(x, y) } -#[rustc_mir] fn test_fn_impl(f: &&Fn(i32, i32) -> i32, x: i32, y: i32) -> i32 { // This call goes through the Fn implementation for &Fn provided in // core::ops::impls. It expands to a static Fn::call() that calls the @@ -118,28 +106,24 @@ fn test_fn_impl(f: &&Fn(i32, i32) -> i32, x: i32, y: i32) -> i32 { f(x, y) } -#[rustc_mir] fn test_fn_direct_call(f: &F, x: i32, y: i32) -> i32 where F: Fn(i32, i32) -> i32 { f.call((x, y)) } -#[rustc_mir] fn test_fn_const_call(f: &F) -> i32 where F: Fn(i32, i32) -> i32 { f.call((100, -1)) } -#[rustc_mir] fn test_fn_nil_call(f: &F) -> i32 where F: Fn() -> i32 { f() } -#[rustc_mir] fn test_fn_transmute_zst(x: ()) -> [(); 1] { fn id(x: T) -> T {x} @@ -148,30 +132,24 @@ fn test_fn_transmute_zst(x: ()) -> [(); 1] { }) } -#[rustc_mir] fn test_fn_ignored_pair() -> ((), ()) { ((), ()) } -#[rustc_mir] fn test_fn_ignored_pair_0() { test_fn_ignored_pair().0 } -#[rustc_mir] fn id(x: T) -> T { x } -#[rustc_mir] fn ignored_pair_named() -> (Foo, Foo) { (Foo, Foo) } -#[rustc_mir] fn test_fn_ignored_pair_named() -> (Foo, Foo) { id(ignored_pair_named()) } -#[rustc_mir] fn test_fn_nested_pair(x: &((f32, f32), u32)) -> (f32, f32) { let y = *x; let z = y.0; diff --git a/src/test/run-pass/mir_trans_calls_variadic.rs b/src/test/run-pass/mir_trans_calls_variadic.rs index 7f711b2758dc7..4e06738da4fd5 100644 --- a/src/test/run-pass/mir_trans_calls_variadic.rs +++ b/src/test/run-pass/mir_trans_calls_variadic.rs @@ -8,14 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - #[link(name = "rust_test_helpers")] extern { fn rust_interesting_average(_: i64, ...) -> f64; } -#[rustc_mir] fn test(a: i64, b: i64, c: i64, d: i64, e: i64, f: T, g: U) -> i64 { unsafe { rust_interesting_average(6, a, a as f64, diff --git a/src/test/run-pass/mir_trans_critical_edge.rs b/src/test/run-pass/mir_trans_critical_edge.rs index 320f40175926d..f6fe19c43097a 100644 --- a/src/test/run-pass/mir_trans_critical_edge.rs +++ b/src/test/run-pass/mir_trans_critical_edge.rs @@ -29,7 +29,6 @@ impl Foo where A: Iterator, B: Iterator { // This is the function we care about - #[rustc_mir] fn next(&mut self) -> Option { match self.state { State::Both => match self.a.next() { diff --git a/src/test/run-pass/mir_trans_spike1.rs b/src/test/run-pass/mir_trans_spike1.rs index 9a06ab78e73b4..8474e841e01ad 100644 --- a/src/test/run-pass/mir_trans_spike1.rs +++ b/src/test/run-pass/mir_trans_spike1.rs @@ -10,9 +10,6 @@ // A simple spike test for MIR version of trans. -#![feature(rustc_attrs)] - -#[rustc_mir] fn sum(x: i32, y: i32) -> i32 { x + y } diff --git a/src/test/run-pass/mir_trans_switch.rs b/src/test/run-pass/mir_trans_switch.rs index c32d9da724d05..b097bf46ad370 100644 --- a/src/test/run-pass/mir_trans_switch.rs +++ b/src/test/run-pass/mir_trans_switch.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - enum Abc { A(u8), B(i8), @@ -17,7 +15,6 @@ enum Abc { D, } -#[rustc_mir] fn foo(x: Abc) -> i32 { match x { Abc::C => 3, @@ -27,7 +24,6 @@ fn foo(x: Abc) -> i32 { } } -#[rustc_mir] fn foo2(x: Abc) -> bool { match x { Abc::D => true, diff --git a/src/test/run-pass/mir_trans_switchint.rs b/src/test/run-pass/mir_trans_switchint.rs index edde5f3c89587..537734596a521 100644 --- a/src/test/run-pass/mir_trans_switchint.rs +++ b/src/test/run-pass/mir_trans_switchint.rs @@ -8,9 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - -#[rustc_mir] pub fn foo(x: i8) -> i32 { match x { 1 => 0, diff --git a/src/test/run-pass/mir_void_return.rs b/src/test/run-pass/mir_void_return.rs index 8b07449b8fafd..78cb9fb39d6a6 100644 --- a/src/test/run-pass/mir_void_return.rs +++ b/src/test/run-pass/mir_void_return.rs @@ -8,9 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - -#[rustc_mir] fn mir() -> (){ let x = 1; let mut y = 0; diff --git a/src/test/run-pass/mir_void_return_2.rs b/src/test/run-pass/mir_void_return_2.rs index a3ad343240918..fc9e3d5e3b535 100644 --- a/src/test/run-pass/mir_void_return_2.rs +++ b/src/test/run-pass/mir_void_return_2.rs @@ -8,11 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] - fn nil() {} -#[rustc_mir] fn mir(){ nil() } diff --git a/src/test/run-pass/attr-no-drop-flag-size.rs b/src/test/run-pass/no-drop-flag-size.rs similarity index 83% rename from src/test/run-pass/attr-no-drop-flag-size.rs rename to src/test/run-pass/no-drop-flag-size.rs index 0c464c9bad728..a606a8a9f4b3b 100644 --- a/src/test/run-pass/attr-no-drop-flag-size.rs +++ b/src/test/run-pass/no-drop-flag-size.rs @@ -1,4 +1,4 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -8,11 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(unsafe_no_drop_flag)] - use std::mem::size_of; -#[unsafe_no_drop_flag] struct Test { a: T } diff --git a/src/test/run-pass/vec-matching-fold.rs b/src/test/run-pass/vec-matching-fold.rs index 7a6129d311ee3..ac80a4211ada6 100644 --- a/src/test/run-pass/vec-matching-fold.rs +++ b/src/test/run-pass/vec-matching-fold.rs @@ -11,11 +11,9 @@ #![feature(advanced_slice_patterns)] #![feature(slice_patterns)] -#![feature(rustc_attrs)] use std::fmt::Debug; -#[rustc_mir(graphviz="mir.gv")] fn foldl(values: &[T], initial: U, mut function: F) @@ -32,7 +30,6 @@ fn foldl(values: &[T], } } -#[rustc_mir] fn foldr(values: &[T], initial: U, mut function: F) diff --git a/src/test/run-pass/vec-matching-legal-tail-element-borrow.rs b/src/test/run-pass/vec-matching-legal-tail-element-borrow.rs index 1093bc7c18b86..eecc3e7afdbb4 100644 --- a/src/test/run-pass/vec-matching-legal-tail-element-borrow.rs +++ b/src/test/run-pass/vec-matching-legal-tail-element-borrow.rs @@ -8,9 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(slice_patterns, rustc_attrs)] +#![feature(slice_patterns)] -#[rustc_mir] pub fn main() { let x = &[1, 2, 3, 4, 5]; let x: &[isize] = &[1, 2, 3, 4, 5]; diff --git a/src/test/run-pass/vec-matching.rs b/src/test/run-pass/vec-matching.rs index 075709a63b5f5..97006f54cd955 100644 --- a/src/test/run-pass/vec-matching.rs +++ b/src/test/run-pass/vec-matching.rs @@ -11,9 +11,7 @@ #![feature(advanced_slice_patterns)] #![feature(slice_patterns)] -#![feature(rustc_attrs)] -#[rustc_mir] fn a() { let x = [1]; match x { @@ -23,7 +21,6 @@ fn a() { } } -#[rustc_mir] fn b() { let x = [1, 2, 3]; match x { @@ -60,7 +57,6 @@ fn b() { } -#[rustc_mir] fn b_slice() { let x : &[_] = &[1, 2, 3]; match x { @@ -100,7 +96,6 @@ fn b_slice() { } } -#[rustc_mir] fn c() { let x = [1]; match x { @@ -109,7 +104,6 @@ fn c() { } } -#[rustc_mir] fn d() { let x = [1, 2, 3]; let branch = match x { @@ -121,7 +115,6 @@ fn d() { assert_eq!(branch, 1); } -#[rustc_mir] fn e() { let x: &[isize] = &[1, 2, 3]; let a = match *x { diff --git a/src/test/run-pass/vec-tail-matching.rs b/src/test/run-pass/vec-tail-matching.rs index 6084a0d07a114..d123eb36a7d4d 100644 --- a/src/test/run-pass/vec-tail-matching.rs +++ b/src/test/run-pass/vec-tail-matching.rs @@ -11,13 +11,11 @@ #![feature(slice_patterns)] -#![feature(rustc_attrs)] struct Foo { string: &'static str } -#[rustc_mir] pub fn main() { let x = [ Foo { string: "foo" }, diff --git a/src/test/run-pass/zero-size-type-destructors.rs b/src/test/run-pass/zero-size-type-destructors.rs index a663ae650c087..18b6c372a5ec2 100644 --- a/src/test/run-pass/zero-size-type-destructors.rs +++ b/src/test/run-pass/zero-size-type-destructors.rs @@ -8,15 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs, unsafe_no_drop_flag)] - -// ignore-pretty : (#23623) problems when ending with // comments - static mut destructions : isize = 3; -#[rustc_no_mir] // FIXME #29855 MIR doesn't handle all drops correctly. pub fn foo() { - #[unsafe_no_drop_flag] struct Foo; impl Drop for Foo { diff --git a/src/test/run-pass/zero_sized_subslice_match.rs b/src/test/run-pass/zero_sized_subslice_match.rs index 00f4aa98a3e06..d399ef72976f1 100644 --- a/src/test/run-pass/zero_sized_subslice_match.rs +++ b/src/test/run-pass/zero_sized_subslice_match.rs @@ -8,10 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_attrs)] #![feature(slice_patterns)] -#[rustc_mir] fn main() { let x = [(), ()];