From f7d9b439720fed4434a532244f0eb873cf14eb91 Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Thu, 23 Apr 2015 22:45:03 +0200 Subject: [PATCH 1/6] Latent bug in iter_structural_ty: handle `_match::Single` on zero-variant enum. (This may not be the *best* fix, compared to e.g. returning `_match::NoBranch` from `trans_switch` on a zero-variant enum. But it is one of the *simplest* fixes available.) --- src/librustc_trans/trans/adt.rs | 1 + src/librustc_trans/trans/base.rs | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs index ffa068a2ae44b..2057f502c1b69 100644 --- a/src/librustc_trans/trans/adt.rs +++ b/src/librustc_trans/trans/adt.rs @@ -769,6 +769,7 @@ pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None))) } Univariant(..) => { + // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). (_match::Single, None) } } diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs index 59f3ff7260261..f3d82970e4eef 100644 --- a/src/librustc_trans/trans/base.rs +++ b/src/librustc_trans/trans/base.rs @@ -471,8 +471,11 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, match adt::trans_switch(cx, &*repr, av) { (_match::Single, None) => { - cx = iter_variant(cx, &*repr, av, &*(*variants)[0], - substs, &mut f); + if n_variants != 0 { + assert!(n_variants == 1); + cx = iter_variant(cx, &*repr, av, &*(*variants)[0], + substs, &mut f); + } } (_match::Switch, Some(lldiscrim_a)) => { cx = f(cx, lldiscrim_a, cx.tcx().types.isize); From 8f987956b4d173c9af26fbf2aafcc661abcc14cb Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Wed, 22 Apr 2015 11:52:08 +0200 Subject: [PATCH 2/6] Added new kind of drop-glue that just drops the type's contents, without invoking the Drop::drop implementation. This is necessary for dealing with an enum that switches own `self` to a different variant while running its destructor. Fix #23611. --- src/librustc_trans/trans/_match.rs | 2 +- src/librustc_trans/trans/cleanup.rs | 84 ++++++++++++++---- src/librustc_trans/trans/context.rs | 9 +- src/librustc_trans/trans/glue.rs | 131 +++++++++++++++++++--------- 4 files changed, 164 insertions(+), 62 deletions(-) diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs index 744ec5a616841..1aa996a05ac6d 100644 --- a/src/librustc_trans/trans/_match.rs +++ b/src/librustc_trans/trans/_match.rs @@ -916,7 +916,7 @@ fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let datum = Datum::new(llval, binding_info.ty, Lvalue); if let Some(cs) = cs { - bcx.fcx.schedule_drop_and_zero_mem(cs, llval, binding_info.ty); + bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty); bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch); } diff --git a/src/librustc_trans/trans/cleanup.rs b/src/librustc_trans/trans/cleanup.rs index 61af5bfaef8de..3ec73ff8eb9de 100644 --- a/src/librustc_trans/trans/cleanup.rs +++ b/src/librustc_trans/trans/cleanup.rs @@ -393,19 +393,22 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty), val: val, ty: ty, - zero: false + fill_on_drop: false, + skip_dtor: false, }; - debug!("schedule_drop_mem({:?}, val={}, ty={})", + debug!("schedule_drop_mem({:?}, val={}, ty={}) fill_on_drop={} skip_dtor={}", cleanup_scope, self.ccx.tn().val_to_string(val), - ty.repr(self.ccx.tcx())); + ty.repr(self.ccx.tcx()), + drop.fill_on_drop, + drop.skip_dtor); self.schedule_clean(cleanup_scope, drop as CleanupObj); } - /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty` - fn schedule_drop_and_zero_mem(&self, + /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty` + fn schedule_drop_and_fill_mem(&self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>) { @@ -416,14 +419,48 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty), val: val, ty: ty, - zero: true + fill_on_drop: true, + skip_dtor: false, + }; + + debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={}, fill_on_drop={}, skip_dtor={})", + cleanup_scope, + self.ccx.tn().val_to_string(val), + ty.repr(self.ccx.tcx()), + drop.fill_on_drop, + drop.skip_dtor); + + self.schedule_clean(cleanup_scope, drop as CleanupObj); + } + + /// Issue #23611: Schedules a (deep) drop of the contents of + /// `val`, which is a pointer to an instance of struct/enum type + /// `ty`. The scheduled code handles extracting the discriminant + /// and dropping the contents associated with that variant + /// *without* executing any associated drop implementation. + fn schedule_drop_enum_contents(&self, + cleanup_scope: ScopeId, + val: ValueRef, + ty: Ty<'tcx>) { + // `if` below could be "!contents_needs_drop"; skipping drop + // is just an optimization, so sound to be conservative. + if !self.type_needs_drop(ty) { return; } + + let drop = box DropValue { + is_immediate: false, + must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty), + val: val, + ty: ty, + fill_on_drop: false, + skip_dtor: true, }; - debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})", + debug!("schedule_drop_enum_contents({:?}, val={}, ty={}) fill_on_drop={} skip_dtor={}", cleanup_scope, self.ccx.tn().val_to_string(val), ty.repr(self.ccx.tcx()), - true); + drop.fill_on_drop, + drop.skip_dtor); self.schedule_clean(cleanup_scope, drop as CleanupObj); } @@ -440,13 +477,16 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty), val: val, ty: ty, - zero: false + fill_on_drop: false, + skip_dtor: false, }; - debug!("schedule_drop_immediate({:?}, val={}, ty={:?})", + debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", cleanup_scope, self.ccx.tn().val_to_string(val), - ty.repr(self.ccx.tcx())); + ty.repr(self.ccx.tcx()), + drop.fill_on_drop, + drop.skip_dtor); self.schedule_clean(cleanup_scope, drop as CleanupObj); } @@ -987,7 +1027,8 @@ pub struct DropValue<'tcx> { must_unwind: bool, val: ValueRef, ty: Ty<'tcx>, - zero: bool + fill_on_drop: bool, + skip_dtor: bool, } impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { @@ -1007,13 +1048,18 @@ impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { bcx: Block<'blk, 'tcx>, debug_loc: DebugLoc) -> Block<'blk, 'tcx> { - let _icx = base::push_ctxt("::trans"); + let skip_dtor = self.skip_dtor; + let _icx = if skip_dtor { + base::push_ctxt("::trans skip_dtor=true") + } else { + base::push_ctxt("::trans skip_dtor=false") + }; let bcx = if self.is_immediate { - glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc) + glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) } else { - glue::drop_ty(bcx, self.val, self.ty, debug_loc) + glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) }; - if self.zero { + if self.fill_on_drop { base::drop_done_fill_mem(bcx, self.val, self.ty); } bcx @@ -1190,10 +1236,14 @@ pub trait CleanupMethods<'blk, 'tcx> { cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>); - fn schedule_drop_and_zero_mem(&self, + fn schedule_drop_and_fill_mem(&self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>); + fn schedule_drop_enum_contents(&self, + cleanup_scope: ScopeId, + val: ValueRef, + ty: Ty<'tcx>); fn schedule_drop_immediate(&self, cleanup_scope: ScopeId, val: ValueRef, diff --git a/src/librustc_trans/trans/context.rs b/src/librustc_trans/trans/context.rs index e54962dc08552..98187d76df89b 100644 --- a/src/librustc_trans/trans/context.rs +++ b/src/librustc_trans/trans/context.rs @@ -21,6 +21,7 @@ use trans::builder::Builder; use trans::common::{ExternMap,BuilderRef_res}; use trans::debuginfo; use trans::declare; +use trans::glue::DropGlueKind; use trans::monomorphize::MonoId; use trans::type_::{Type, TypeNames}; use middle::subst::Substs; @@ -73,7 +74,7 @@ pub struct SharedCrateContext<'tcx> { check_drop_flag_for_sanity: bool, available_monomorphizations: RefCell>, - available_drop_glues: RefCell, String>>, + available_drop_glues: RefCell, String>>, } /// The local portion of a `CrateContext`. There is one `LocalCrateContext` @@ -89,7 +90,7 @@ pub struct LocalCrateContext<'tcx> { item_vals: RefCell>, needs_unwind_cleanup_cache: RefCell, bool>>, fn_pointer_shims: RefCell, ValueRef>>, - drop_glues: RefCell, ValueRef>>, + drop_glues: RefCell, ValueRef>>, /// Track mapping of external ids to local items imported for inlining external: RefCell>>, /// Backwards version of the `external` map (inlined items to where they @@ -574,7 +575,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local.fn_pointer_shims } - pub fn drop_glues<'a>(&'a self) -> &'a RefCell, ValueRef>> { + pub fn drop_glues<'a>(&'a self) -> &'a RefCell, ValueRef>> { &self.local.drop_glues } @@ -660,7 +661,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.shared.available_monomorphizations } - pub fn available_drop_glues<'a>(&'a self) -> &'a RefCell, String>> { + pub fn available_drop_glues(&self) -> &RefCell, String>> { &self.shared.available_drop_glues } diff --git a/src/librustc_trans/trans/glue.rs b/src/librustc_trans/trans/glue.rs index f974796e69cad..7bda273c2351b 100644 --- a/src/librustc_trans/trans/glue.rs +++ b/src/librustc_trans/trans/glue.rs @@ -132,14 +132,26 @@ pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { + debug_loc: DebugLoc) -> Block<'blk, 'tcx> { + drop_ty_core(bcx, v, t, debug_loc, false) +} + +pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + debug_loc: DebugLoc, + skip_dtor: bool) -> Block<'blk, 'tcx> { // NB: v is an *alias* of type t here, not a direct value. - debug!("drop_ty(t={})", t.repr(bcx.tcx())); + debug!("drop_ty_core(t={}, skip_dtor={})", t.repr(bcx.tcx()), skip_dtor); let _icx = push_ctxt("drop_ty"); if bcx.fcx.type_needs_drop(t) { let ccx = bcx.ccx(); - let glue = get_drop_glue(ccx, t); + let g = if skip_dtor { + DropGlueKind::TyContents(t) + } else { + DropGlueKind::Ty(t) + }; + let glue = get_drop_glue_core(ccx, g); let glue_type = get_drop_glue_type(ccx, t); let ptr = if glue_type != t { PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to()) @@ -155,22 +167,64 @@ pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, - debug_loc: DebugLoc) + debug_loc: DebugLoc, + skip_dtor: bool) -> Block<'blk, 'tcx> { let _icx = push_ctxt("drop_ty_immediate"); let vp = alloca(bcx, type_of(bcx.ccx(), t), ""); store_ty(bcx, v, vp, t); - drop_ty(bcx, vp, t, debug_loc) + drop_ty_core(bcx, vp, t, debug_loc, skip_dtor) } pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef { - debug!("make drop glue for {}", ppaux::ty_to_string(ccx.tcx(), t)); - let t = get_drop_glue_type(ccx, t); - debug!("drop glue type {}", ppaux::ty_to_string(ccx.tcx(), t)); - match ccx.drop_glues().borrow().get(&t) { + get_drop_glue_core(ccx, DropGlueKind::Ty(t)) +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum DropGlueKind<'tcx> { + /// The normal path; runs the dtor, and then recurs on the contents + Ty(Ty<'tcx>), + /// Skips the dtor, if any, for ty; drops the contents directly. + /// Note that the dtor is only skipped at the most *shallow* + /// level, namely, an `impl Drop for Ty` itself. So, for example, + /// if Ty is Newtype(S) then only the Drop impl for for Newtype + /// itself will be skipped, while the Drop impl for S, if any, + /// will be invoked. + TyContents(Ty<'tcx>), +} + +impl<'tcx> DropGlueKind<'tcx> { + fn ty(&self) -> Ty<'tcx> { + match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t } + } + + fn map_ty(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx> + { + match *self { + DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)), + DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)), + } + } + + fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String { + let t_str = ppaux::ty_to_string(ccx.tcx(), self.ty()); + match *self { + DropGlueKind::Ty(_) => format!("DropGlueKind::Ty({})", t_str), + DropGlueKind::TyContents(_) => format!("DropGlueKind::TyContents({})", t_str), + } + } +} + +fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + g: DropGlueKind<'tcx>) -> ValueRef { + debug!("make drop glue for {}", g.to_string(ccx)); + let g = g.map_ty(|t| get_drop_glue_type(ccx, t)); + debug!("drop glue type {}", g.to_string(ccx)); + match ccx.drop_glues().borrow().get(&g) { Some(&glue) => return glue, _ => { } } + let t = g.ty(); let llty = if type_is_sized(ccx.tcx(), t) { type_of(ccx, t).ptr_to() @@ -182,9 +236,9 @@ pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Val // To avoid infinite recursion, don't `make_drop_glue` until after we've // added the entry to the `drop_glues` cache. - if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&t) { + if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) { let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ty::mk_nil(ccx.tcx())); - ccx.drop_glues().borrow_mut().insert(t, llfn); + ccx.drop_glues().borrow_mut().insert(g, llfn); return llfn; }; @@ -192,7 +246,7 @@ pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Val let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ty::mk_nil(ccx.tcx())).unwrap_or_else(||{ ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm)); }); - ccx.available_drop_glues().borrow_mut().insert(t, fn_nm); + ccx.available_drop_glues().borrow_mut().insert(g, fn_nm); let _s = StatRecorder::new(ccx, format!("drop {}", ty_to_short_str(ccx.tcx(), t))); @@ -217,7 +271,7 @@ pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Val // type, so we don't need to explicitly cast the function parameter. let llrawptr0 = get_param(llfn, fcx.arg_pos(0) as c_uint); - let bcx = make_drop_glue(bcx, llrawptr0, t); + let bcx = make_drop_glue(bcx, llrawptr0, g); finish_fn(&fcx, bcx, ty::FnConverging(ty::mk_nil(ccx.tcx())), DebugLoc::None); llfn @@ -338,11 +392,16 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, (Load(bcx, data), Some(Load(bcx, info))) }; - adt::fold_variants(bcx, &*repr, struct_data, |variant_cx, st, value| { - // Be sure to put all of the fields into a scope so we can use an invoke + debug!("trans_struct_drop t: {} fty: {} self_ty: {}", + bcx.ty_to_string(t), bcx.ty_to_string(fty), bcx.ty_to_string(self_ty)); + // TODO: surely no reason to keep dispatching on variants here. + adt::fold_variants(bcx, &*repr, struct_data, |variant_cx, struct_info, value| { + debug!("trans_struct_drop fold_variant: struct_info: {:?}", struct_info); + // Be sure to put the enum contents into a scope so we can use an invoke // instruction to call the user destructor but still call the field // destructors if the user destructor panics. let field_scope = variant_cx.fcx.push_custom_cleanup_scope(); + variant_cx.fcx.schedule_drop_enum_contents(cleanup::CustomScope(field_scope), v0, t); // Class dtors have no explicit args, so the params should // just consist of the environment (self). @@ -362,30 +421,12 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } else { PointerCast(variant_cx, value, params[0]) }; - let args = vec!(self_arg); - - // Add all the fields as a value which needs to be cleaned at the end of - // this scope. Iterate in reverse order so a Drop impl doesn't reverse - // the order in which fields get dropped. - for (i, &ty) in st.fields.iter().enumerate().rev() { - let llfld_a = adt::struct_field_ptr(variant_cx, &*st, value, i, false); - - let val = if type_is_sized(bcx.tcx(), ty) { - llfld_a - } else { - let scratch = datum::rvalue_scratch_datum(bcx, ty, "__fat_ptr_drop_field"); - Store(bcx, llfld_a, GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR])); - Store(bcx, info.unwrap(), GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_EXTRA])); - scratch.val - }; - variant_cx.fcx.schedule_drop_mem(cleanup::CustomScope(field_scope), val, ty); - } let dtor_ty = ty::mk_ctor_fn(bcx.tcx(), class_did, &[get_drop_glue_type(bcx.ccx(), t)], ty::mk_nil(bcx.tcx())); - let (_, variant_cx) = invoke(variant_cx, dtor_addr, &args[..], dtor_ty, DebugLoc::None); + let (_, variant_cx) = invoke(variant_cx, dtor_addr, &[self_arg], dtor_ty, DebugLoc::None); variant_cx.fcx.pop_and_trans_custom_cleanup_scope(variant_cx, field_scope) }) @@ -454,8 +495,10 @@ fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: } } -fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>) +fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>) -> Block<'blk, 'tcx> { + let t = g.ty(); + let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; // NB: v0 is an *alias* of type t here, not a direct value. let _icx = push_ctxt("make_drop_glue"); @@ -469,6 +512,10 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>) match t.sty { ty::ty_uniq(content_ty) => { + // Support for ty_uniq is built-in and its drop glue is + // special. It may move to library and have Drop impl. As + // a safe-guard, assert ty_uniq not used with TyContents. + assert!(!skip_dtor); if !type_is_sized(bcx.tcx(), content_ty) { let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]); let llbox = Load(bcx, llval); @@ -505,8 +552,8 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>) } ty::ty_struct(did, substs) | ty::ty_enum(did, substs) => { let tcx = bcx.tcx(); - match ty::ty_dtor(tcx, did) { - ty::TraitDtor(dtor, true) => { + match (ty::ty_dtor(tcx, did), skip_dtor) { + (ty::TraitDtor(dtor, true), false) => { // FIXME(16758) Since the struct is unsized, it is hard to // find the drop flag (which is at the end of the struct). // Lets just ignore the flag and pretend everything will be @@ -523,16 +570,20 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>) trans_struct_drop(bcx, t, v0, dtor, did, substs) } } - ty::TraitDtor(dtor, false) => { + (ty::TraitDtor(dtor, false), false) => { trans_struct_drop(bcx, t, v0, dtor, did, substs) } - ty::NoDtor => { + (ty::NoDtor, _) | (_, true) => { // No dtor? Just the default case iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None)) } } } ty::ty_trait(..) => { + // No support in vtable for distinguishing destroying with + // versus without calling Drop::drop. Assert caller is + // okay with always calling the Drop impl, if any. + assert!(!skip_dtor); let data_ptr = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]); let vtable_ptr = Load(bcx, GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA])); let dtor = Load(bcx, vtable_ptr); From d5acb55c5996bfca6d754bbf0d5dc53ef2a2e5a8 Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Thu, 23 Apr 2015 19:35:46 +0200 Subject: [PATCH 3/6] regression test. --- .../run-pass/issue-23611-enum-swap-in-drop.rs | 265 ++++++++++++++++++ 1 file changed, 265 insertions(+) create mode 100644 src/test/run-pass/issue-23611-enum-swap-in-drop.rs diff --git a/src/test/run-pass/issue-23611-enum-swap-in-drop.rs b/src/test/run-pass/issue-23611-enum-swap-in-drop.rs new file mode 100644 index 0000000000000..540cbd50e9d57 --- /dev/null +++ b/src/test/run-pass/issue-23611-enum-swap-in-drop.rs @@ -0,0 +1,265 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Issue 23611: this test is ensuring that, for an instance `X` of the +// enum `E`, if you swap in a different variant during the execution +// of the `::drop`, then the appropriate substructure will +// be torn down after the `::drop` method returns. + +use std::cell::RefCell; +use std::mem; + +use self::d::D; + +pub fn main() { + let log = RefCell::new(vec![]); + d::println(&format!("created empty log")); + test(&log); + + // println!("log: {:?}", &log.borrow()[..]); + assert_eq!( + &log.borrow()[..], + [ + // created empty log + // +-- Make D(test_1, 10000000) + // | +-- Make D(g_b_5, 50000001) + // | | in g_B(b2b0) from E::drop, b=b4b0 + // | +-- Drop D(g_b_5, 50000001) + 50000001, + // | + // | +-- Make D(drop_6, 60000002) + // | | +-- Make D(g_b_5, 50000003) + // | | | in g_B(b2b0) from E::drop, b=b4b1 + // | | +-- Drop D(g_b_5, 50000003) + 50000003, + // | | + // | | +-- Make D(GaspB::drop_3, 30000004) + // | | | +-- Make D(g_b_5, 50000005) + // | | | | in g_B(b4b2) from GaspB::drop + // | | | +-- Drop D(g_b_5, 50000005) + 50000005, + // | | | + // | | +-- Drop D(GaspB::drop_3, 30000004) + 30000004, + // | | + // +-- Drop D(test_1, 10000000) + 10000000, + // | + // +-- Make D(GaspA::drop_2, 20000006) + // | | +-- Make D(f_a_4, 40000007) + // | | | in f_A(a3a0) from GaspA::drop + // | | +-- Drop D(f_a_4, 40000007) + 40000007, + // | | + // +-- Drop D(GaspA::drop_2, 20000006) + 20000006, + // | + // +-- Drop D(drop_6, 60000002) + 60000002 + // + ]); + + // For reference purposes, the old (incorrect) behavior would produce the following + // output, which you can compare to the above: + // + // created empty log + // +-- Make D(test_1, 10000000) + // | +-- Make D(g_b_5, 50000001) + // | | in g_B(b2b0) from E::drop, b=b4b0 + // | +-- Drop D(g_b_5, 50000001) + // | + // | +-- Make D(drop_6, 60000002) + // | | +-- Make D(g_b_5, 50000003) + // | | | in g_B(b2b0) from E::drop, b=b4b1 + // | | +-- Drop D(g_b_5, 50000003) + // | | + // | | +-- Make D(GaspB::drop_3, 30000004) + // | | | +-- Make D(g_b_5, 50000005) + // | | | | in g_B(b4b2) from GaspB::drop + // | | | +-- Drop D(g_b_5, 50000005) + // | | | + // | | +-- Drop D(GaspB::drop_3, 30000004) + // | | + // +-- Drop D(test_1, 10000000) + // | + // +-- Make D(GaspB::drop_3, 30000006) + // | | +-- Make D(f_a_4, 40000007) + // | | | in f_A(a3a0) from GaspB::drop + // | | +-- Drop D(f_a_4, 40000007) + // | | + // +-- Drop D(GaspB::drop_3, 30000006) + // | + // +-- Drop D(drop_6, 60000002) + + // Note that this calls f_A from GaspB::drop (and thus creates a D + // with a uid incorporating the origin of GaspB's drop that + // surrounds the f_A invocation), but the code as written only + // ever hands f_A off to instances of GaspA, and thus one should + // be able to prove the invariant that f_A is *only* invoked from + // from an instance of GaspA (either via the GaspA drop + // implementation or the E drop implementaton). Yet the old (bad) + // behavior allowed a call to f_A to leak in while we are tearing + // down a value of type GaspB. +} + +fn test<'a>(log: d::Log<'a>) { + let _e = E::B(GaspB(g_b, 0xB4B0, log, D::new("test", 1, log)), true); +} + +struct GaspA<'a>(for <'b> fn(u32, &'b str, d::Log<'a>), u32, d::Log<'a>, d::D<'a>); +struct GaspB<'a>(for <'b> fn(u32, &'b str, d::Log<'a>), u32, d::Log<'a>, d::D<'a>); + +impl<'a> Drop for GaspA<'a> { + fn drop(&mut self) { + let _d = d::D::new("GaspA::drop", 2, self.2); + (self.0)(self.1, "GaspA::drop", self.2); + } +} + +impl<'a> Drop for GaspB<'a> { + fn drop(&mut self) { + let _d = d::D::new("GaspB::drop", 3, self.2); + (self.0)(self.1, "GaspB::drop", self.2); + } +} + +enum E<'a> { + A(GaspA<'a>, bool), B(GaspB<'a>, bool), +} + +fn f_a(x: u32, ctxt: &str, log: d::Log) { + let _d = d::D::new("f_a", 4, log); + d::println(&format!("in f_A({:x}) from {}", x, ctxt)); +} +fn g_b(y: u32, ctxt: &str, log: d::Log) { + let _d = d::D::new("g_b", 5, log); + d::println(&format!("in g_B({:x}) from {}", y, ctxt)); +} + +impl<'a> Drop for E<'a> { + fn drop(&mut self) { + let (do_drop, log) = match *self { + E::A(GaspA(ref f, ref mut val_a, log, ref _d_a), ref mut do_drop) => { + f(0xA1A0, &format!("E::drop, a={:x}", val_a), log); + *val_a += 1; + // swap in do_drop := false to avoid infinite dtor regress + (mem::replace(do_drop, false), log) + } + E::B(GaspB(ref g, ref mut val_b, log, ref _d_b), ref mut do_drop) => { + g(0xB2B0, &format!("E::drop, b={:x}", val_b), log); + *val_b += 1; + // swap in do_drop := false to avoid infinite dtor regress + (mem::replace(do_drop, false), log) + } + }; + + if do_drop { + mem::replace(self, E::A(GaspA(f_a, 0xA3A0, log, D::new("drop", 6, log)), true)); + } + } +} + +// This module provides simultaneous printouts of the dynamic extents +// of all of the D values, in addition to logging the order that each +// is dropped. +// +// This code is similar to a support code module embedded within +// test/run-pass/issue-123338-ensure-param-drop-order.rs; however, +// that (slightly simpler) code only identifies objects in the log via +// (creation) time-stamps; this incorporates both timestamping and the +// point of origin within the source code into the unique ID (uid). + +const PREF_INDENT: u32 = 20; + +pub mod d { + #![allow(unused_parens)] + use std::fmt; + use std::mem; + use std::cell::RefCell; + + static mut counter: u16 = 0; + static mut trails: u64 = 0; + + pub type Log<'a> = &'a RefCell>; + + pub fn current_width() -> u32 { + unsafe { max_width() - trails.leading_zeros() } + } + + pub fn max_width() -> u32 { + unsafe { + (mem::size_of_val(&trails)*8) as u32 + } + } + + pub fn indent_println(my_trails: u32, s: &str) { + let mut indent: String = String::new(); + for i in 0..my_trails { + unsafe { + if trails & (1 << i) != 0 { + indent = indent + "| "; + } else { + indent = indent + " "; + } + } + } + println!("{}{}", indent, s); + } + + pub fn println(s: &str) { + indent_println(super::PREF_INDENT, s); + } + + fn first_avail() -> u32 { + unsafe { + for i in 0..64 { + if trails & (1 << i) == 0 { + return i; + } + } + } + panic!("exhausted trails"); + } + + pub struct D<'a> { + name: &'static str, i: u8, uid: u32, trail: u32, log: Log<'a> + } + + impl<'a> fmt::Display for D<'a> { + fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { + write!(w, "D({}_{}, {})", self.name, self.i, self.uid) + } + } + + impl<'a> D<'a> { + pub fn new(name: &'static str, i: u8, log: Log<'a>) -> D<'a> { + unsafe { + let trail = first_avail(); + let ctr = ((i as u32) * 10_000_000) + (counter as u32); + counter += 1; + trails |= (1 << trail); + let ret = D { + name: name, i: i, log: log, uid: ctr, trail: trail + }; + indent_println(trail, &format!("+-- Make {}", ret)); + ret + } + } + } + + impl<'a> Drop for D<'a> { + fn drop(&mut self) { + unsafe { trails &= !(1 << self.trail); }; + self.log.borrow_mut().push(self.uid); + indent_println(self.trail, &format!("+-- Drop {}", self)); + indent_println(::PREF_INDENT, ""); + } + } +} From b0a4808757fda7ae4d047e570f0936b969eb6e42 Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Fri, 24 Apr 2015 09:29:56 +0200 Subject: [PATCH 4/6] Remove direct variant-dispatch entirely from trans_struct_drop. This addresses to-do in my code, and simplifies this method a lot to boot. (The necessary enum dispatch has now effectively been shifted entirely into the scheduled cleanup code for the enum contents.) --- src/librustc_trans/trans/glue.rs | 78 ++++++++------------------------ 1 file changed, 19 insertions(+), 59 deletions(-) diff --git a/src/librustc_trans/trans/glue.rs b/src/librustc_trans/trans/glue.rs index 7bda273c2351b..652f6ad366aaa 100644 --- a/src/librustc_trans/trans/glue.rs +++ b/src/librustc_trans/trans/glue.rs @@ -30,7 +30,6 @@ use trans::callee; use trans::cleanup; use trans::cleanup::CleanupMethods; use trans::common::*; -use trans::datum; use trans::debuginfo::DebugLoc; use trans::declare; use trans::expr; @@ -361,75 +360,36 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, substs: &subst::Substs<'tcx>) -> Block<'blk, 'tcx> { - let repr = adt::represent_type(bcx.ccx(), t); + debug!("trans_struct_drop t: {}", bcx.ty_to_string(t)); // Find and call the actual destructor - let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, t, - class_did, substs); + let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, t, class_did, substs); - // The first argument is the "self" argument for drop + // Class dtors have no explicit args, so the params should + // just consist of the environment (self). let params = unsafe { let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr)); ty.element_type().func_params() }; + assert_eq!(params.len(), 1); - let fty = ty::lookup_item_type(bcx.tcx(), dtor_did).ty.subst(bcx.tcx(), substs); - let self_ty = match fty.sty { - ty::ty_bare_fn(_, ref f) => { - let sig = ty::erase_late_bound_regions(bcx.tcx(), &f.sig); - assert!(sig.inputs.len() == 1); - sig.inputs[0] - } - _ => bcx.sess().bug(&format!("Expected function type, found {}", - bcx.ty_to_string(fty))) - }; - - let (struct_data, info) = if type_is_sized(bcx.tcx(), t) { - (v0, None) - } else { - let data = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]); - let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]); - (Load(bcx, data), Some(Load(bcx, info))) - }; + // Be sure to put the contents into a scope so we can use an invoke + // instruction to call the user destructor but still call the field + // destructors if the user destructor panics. + // + // FIXME (#14875) panic-in-drop semantics might be unsupported; we + // might well consider changing below to more direct code. + let contents_scope = bcx.fcx.push_custom_cleanup_scope(); - debug!("trans_struct_drop t: {} fty: {} self_ty: {}", - bcx.ty_to_string(t), bcx.ty_to_string(fty), bcx.ty_to_string(self_ty)); - // TODO: surely no reason to keep dispatching on variants here. - adt::fold_variants(bcx, &*repr, struct_data, |variant_cx, struct_info, value| { - debug!("trans_struct_drop fold_variant: struct_info: {:?}", struct_info); - // Be sure to put the enum contents into a scope so we can use an invoke - // instruction to call the user destructor but still call the field - // destructors if the user destructor panics. - let field_scope = variant_cx.fcx.push_custom_cleanup_scope(); - variant_cx.fcx.schedule_drop_enum_contents(cleanup::CustomScope(field_scope), v0, t); - - // Class dtors have no explicit args, so the params should - // just consist of the environment (self). - assert_eq!(params.len(), 1); - let self_arg = if type_is_fat_ptr(bcx.tcx(), self_ty) { - // The dtor expects a fat pointer, so make one, even if we have to fake it. - let scratch = datum::rvalue_scratch_datum(bcx, t, "__fat_ptr_drop_self"); - Store(bcx, value, GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR])); - Store(bcx, - // If we just had a thin pointer, make a fat pointer by sticking - // null where we put the unsizing info. This works because t - // is a sized type, so we will only unpack the fat pointer, never - // use the fake info. - info.unwrap_or(C_null(Type::i8p(bcx.ccx()))), - GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_EXTRA])); - PointerCast(variant_cx, scratch.val, params[0]) - } else { - PointerCast(variant_cx, value, params[0]) - }; + // Issue #23611: schedule cleanup of contents, re-inspecting the + // discriminant (if any) in case of variant swap in drop code. + bcx.fcx.schedule_drop_enum_contents(cleanup::CustomScope(contents_scope), v0, t); - let dtor_ty = ty::mk_ctor_fn(bcx.tcx(), - class_did, - &[get_drop_glue_type(bcx.ccx(), t)], - ty::mk_nil(bcx.tcx())); - let (_, variant_cx) = invoke(variant_cx, dtor_addr, &[self_arg], dtor_ty, DebugLoc::None); + let glue_type = get_drop_glue_type(bcx.ccx(), t); + let dtor_ty = ty::mk_ctor_fn(bcx.tcx(), class_did, &[glue_type], ty::mk_nil(bcx.tcx())); + let (_, bcx) = invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None); - variant_cx.fcx.pop_and_trans_custom_cleanup_scope(variant_cx, field_scope) - }) + bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) } fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef) From 24f213d0231c04c2bde1ca65bb5dc93df7c4aaa7 Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Mon, 27 Apr 2015 16:08:30 +0200 Subject: [PATCH 5/6] drive-by fix: scheduled drops are executed in reverse order. That is, scheduled drops are executed in reverse order, so for correctness, we *schedule* the lifetime end before we schedule the drop, so that when they are executed, the drop will be executed *before* the lifetime end. --- src/librustc_trans/trans/_match.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs index 1aa996a05ac6d..41d32304582c4 100644 --- a/src/librustc_trans/trans/_match.rs +++ b/src/librustc_trans/trans/_match.rs @@ -916,8 +916,8 @@ fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let datum = Datum::new(llval, binding_info.ty, Lvalue); if let Some(cs) = cs { - bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty); bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch); + bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty); } debug!("binding {} to {}", binding_info.id, bcx.val_to_string(llval)); From 805349a50b9577e81b04e34a7482d6d5dcb01527 Mon Sep 17 00:00:00 2001 From: "Felix S. Klock II" Date: Mon, 27 Apr 2015 16:21:51 +0200 Subject: [PATCH 6/6] Bug fix: `Rvalue(ByRef)` will issue a lifetime_end as its post_store, which is wrong. Kudos to dotdash for tracking down this fix. Presumably the use of `ByRef` was because this value is a reference to the drop-flag; but an Lvalue will serve just as well for that. dotdash argues: > since the drop_flag is in its "final home", Lvalue seems to be the > correct choice. --- src/librustc_trans/trans/adt.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs index 2057f502c1b69..87ee13355971a 100644 --- a/src/librustc_trans/trans/adt.rs +++ b/src/librustc_trans/trans/adt.rs @@ -1061,7 +1061,7 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr<'tcx )); bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { let ptr = struct_field_ptr(variant_cx, st, value, (st.fields.len() - 1), false); - datum::Datum::new(ptr, ptr_ty, datum::Rvalue::new(datum::ByRef)) + datum::Datum::new(ptr, ptr_ty, datum::Lvalue) .store_to(variant_cx, scratch.val) }); let expr_datum = scratch.to_expr_datum();