diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs index 84d464e8f0781..fb05b4186a27a 100644 --- a/src/librustc_trans/trans/_match.rs +++ b/src/librustc_trans/trans/_match.rs @@ -272,7 +272,10 @@ impl<'a, 'tcx> Opt<'a, 'tcx> { } } - fn trans<'blk>(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> { + fn trans<'r, 'blk>(&self, + &mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>) + -> OptResult<'blk> { + let mut bcx = &mut bl.with_fcx(fcx); let _icx = push_ctxt("match::trans_opt"); let ccx = bcx.ccx(); match *self { @@ -281,21 +284,21 @@ impl<'a, 'tcx> Opt<'a, 'tcx> { let (llval, _) = consts::const_expr(ccx, &*lit_expr, bcx.fcx.param_substs); let lit_datum = immediate_rvalue(llval, lit_ty); let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx)); - SingleResult(Result::new(bcx, lit_datum.val)) + SingleResult(Result::new(bcx.bl, lit_datum.val)) } ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => { let (l1, _) = consts::const_expr(ccx, &**l1, bcx.fcx.param_substs); let (l2, _) = consts::const_expr(ccx, &**l2, bcx.fcx.param_substs); - RangeResult(Result::new(bcx, l1), Result::new(bcx, l2)) + RangeResult(Result::new(bcx.bl, l1), Result::new(bcx.bl, l2)) } Variant(disr_val, ref repr, _, _) => { adt::trans_case(bcx, &**repr, disr_val) } SliceLengthEqual(length, _) => { - SingleResult(Result::new(bcx, C_uint(ccx, length))) + SingleResult(Result::new(bcx.bl, C_uint(ccx, length))) } SliceLengthGreaterOrEqual(prefix, suffix, _) => { - LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix))) + LowerBound(Result::new(bcx.bl, C_uint(ccx, prefix + suffix))) } } } @@ -320,10 +323,10 @@ pub enum BranchKind { CompareSliceLength } -pub enum OptResult<'blk, 'tcx: 'blk> { - SingleResult(Result<'blk, 'tcx>), - RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>), - LowerBound(Result<'blk, 'tcx>) +pub enum OptResult<'blk> { + SingleResult(Result<'blk>), + RangeResult(Result<'blk>, Result<'blk>), + LowerBound(Result<'blk>) } #[derive(Clone, Copy, PartialEq)] @@ -352,7 +355,7 @@ pub struct BindingInfo<'tcx> { type BindingsMap<'tcx> = FnvHashMap>; struct ArmData<'p, 'blk, 'tcx: 'blk> { - bodycx: Block<'blk, 'tcx>, + bodycx: &'blk Block, arm: &'p ast::Arm, bindings_map: BindingsMap<'tcx> } @@ -391,11 +394,11 @@ fn has_nested_bindings(m: &[Match], col: usize) -> bool { return false; } -fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: ValueRef) - -> Vec> { +fn expand_nested_bindings<'a, 'p, 'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + m: &[Match<'a, 'p, 'blk, 'tcx>], + col: usize, + val: ValueRef) + -> Vec> { debug!("expand_nested_bindings(bcx={}, m={}, col={}, val={})", bcx.to_str(), m.repr(bcx.tcx()), @@ -427,13 +430,13 @@ fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, }).collect() } -fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - dm: &DefMap, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: ValueRef, - mut e: F) - -> Vec> where +fn enter_match<'a, 'b, 'p, 'r, 'blk, 'tcx, F>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + dm: &DefMap, + m: &[Match<'a, 'p, 'blk, 'tcx>], + col: usize, + val: ValueRef, + mut e: F) + -> Vec> where F: FnMut(&[&'p ast::Pat]) -> Option>, { debug!("enter_match(bcx={}, m={}, col={}, val={})", @@ -473,12 +476,12 @@ fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, }).collect() } -fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - dm: &DefMap, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: ValueRef) - -> Vec> { +fn enter_default<'a, 'p, 'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + dm: &DefMap, + m: &[Match<'a, 'p, 'blk, 'tcx>], + col: usize, + val: ValueRef) + -> Vec> { debug!("enter_default(bcx={}, m={}, col={}, val={})", bcx.to_str(), m.repr(bcx.tcx()), @@ -526,8 +529,8 @@ fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// takes the complete row of patterns rather than just the first one. /// Also, most of the enter_() family functions have been unified with /// the check_match specialization step. -fn enter_opt<'a, 'p, 'blk, 'tcx>( - bcx: Block<'blk, 'tcx>, +fn enter_opt<'a, 'p, 'r, 'blk, 'tcx>( + bcx: &mut BlockContext<'r, 'blk, 'tcx>, _: ast::NodeId, dm: &DefMap, m: &[Match<'a, 'p, 'blk, 'tcx>], @@ -573,10 +576,10 @@ fn enter_opt<'a, 'p, 'blk, 'tcx>( // Returns the options in one column of matches. An option is something that // needs to be conditionally matched at runtime; for example, the discriminant // on a set of enum variants or a literal. -fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize) - -> Vec> { +fn get_branches<'a, 'p, 'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + m: &[Match<'a, 'p, 'blk, 'tcx>], + col: usize) + -> Vec> { let tcx = bcx.tcx(); let mut found: Vec = vec![]; @@ -629,22 +632,22 @@ fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, found } -struct ExtractedBlock<'blk, 'tcx: 'blk> { +struct ExtractedBlock<'blk> { vals: Vec, - bcx: Block<'blk, 'tcx>, + bcx: &'blk Block } -fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, - disr_val: ty::Disr, - val: ValueRef) - -> ExtractedBlock<'blk, 'tcx> { +fn extract_variant_args<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + repr: &adt::Repr<'tcx>, + disr_val: ty::Disr, + val: ValueRef) + -> ExtractedBlock<'blk> { let _icx = push_ctxt("match::extract_variant_args"); let args = (0..adt::num_args(repr, disr_val)).map(|i| { adt::trans_field_ptr(bcx, repr, val, disr_val, i) }).collect(); - ExtractedBlock { vals: args, bcx: bcx } + ExtractedBlock { vals: args, bcx: bcx.bl } } /// Helper for converting from the ValueRef that we pass around in the match code, which is always @@ -653,7 +656,7 @@ fn match_datum<'tcx>(val: ValueRef, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> { Datum::new(val, left_ty, Lvalue) } -fn bind_subslice_pat(bcx: Block, +fn bind_subslice_pat(bcx: &mut BlockContext, pat_id: ast::NodeId, val: ValueRef, offset_left: usize, @@ -664,36 +667,38 @@ fn bind_subslice_pat(bcx: Block, let vec_datum = match_datum(val, vec_ty); let (base, len) = vec_datum.get_vec_base_and_len(bcx); - let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]); + let ol = C_uint(bcx.ccx(), offset_left); + let slice_begin = InBoundsGEP(bcx, base, &[ol]); let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right); let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None); let slice_ty = ty::mk_slice(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), ty::mt {ty: unit_ty, mutbl: ast::MutImmutable}); let scratch = rvalue_scratch_datum(bcx, slice_ty, ""); - Store(bcx, slice_begin, - GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR])); - Store(bcx, slice_len, GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_EXTRA])); + let fpa = GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR]); + Store(bcx, slice_begin, fpa); + let fpe = GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_EXTRA]); + Store(bcx, slice_len, fpe); scratch.val } -fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - left_ty: Ty<'tcx>, - before: usize, - after: usize, - val: ValueRef) - -> ExtractedBlock<'blk, 'tcx> { +fn extract_vec_elems<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + left_ty: Ty<'tcx>, + before: usize, + after: usize, + val: ValueRef) + -> ExtractedBlock<'blk> { let _icx = push_ctxt("match::extract_vec_elems"); let vec_datum = match_datum(val, left_ty); let (base, len) = vec_datum.get_vec_base_and_len(bcx); let mut elems = vec![]; elems.extend((0..before).map(|i| GEPi(bcx, base, &[i]))); elems.extend((0..after).rev().map(|i| { - InBoundsGEP(bcx, base, &[ - Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None) - ]) + let ty = C_uint(bcx.ccx(), i + 1); + let sub = Sub(bcx, len, ty, DebugLoc::None); + InBoundsGEP(bcx, base, &[sub]) })); - ExtractedBlock { vals: elems, bcx: bcx } + ExtractedBlock { vals: elems, bcx: bcx.bl } } // Macro for deciding whether any of the remaining matches fit a given kind of @@ -760,7 +765,7 @@ impl FailureHandler { !self.is_fallible() } - fn handle_fail(&self, bcx: Block) { + fn handle_fail(&self, bcx: &mut BlockContext) { match *self { Infallible => panic!("attempted to panic in a non-panicking panic handler!"), @@ -810,22 +815,20 @@ fn pick_column_to_specialize(def_map: &DefMap, m: &[Match]) -> Option { } // Compiles a comparison between two things. -fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - rhs_t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - fn compare_str<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - rhs_t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - let did = langcall(cx, - None, - &format!("comparison of `{}`", - cx.ty_to_string(rhs_t)), +fn compare_values<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, + lhs: ValueRef, + rhs: ValueRef, + rhs_t: Ty<'tcx>, + debug_loc: DebugLoc) + -> Result<'blk> { + fn compare_str<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, + lhs: ValueRef, + rhs: ValueRef, + rhs_t: Ty<'tcx>, + debug_loc: DebugLoc) + -> Result<'blk> { + let ty_to_string = cx.ty_to_string(rhs_t); + let did = langcall(cx, None, &format!("comparison of `{}`", ty_to_string), StrEqFnLangItem); let t = ty::mk_str_slice(cx.tcx(), cx.tcx().mk_region(ty::ReStatic), ast::MutImmutable); // The comparison function gets the slices by value, so we have to make copies here. Even @@ -836,8 +839,8 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, memcpy_ty(cx, lhs_arg, lhs, t); memcpy_ty(cx, rhs_arg, rhs, t); let res = callee::trans_lang_call(cx, did, &[lhs_arg, rhs_arg], None, debug_loc); - call_lifetime_end(res.bcx, lhs_arg); - call_lifetime_end(res.bcx, rhs_arg); + call_lifetime_end(&mut res.bcx.with_fcx(cx.fcx), lhs_arg); + call_lifetime_end(&mut res.bcx.with_fcx(cx.fcx), rhs_arg); res } @@ -845,7 +848,7 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, let _icx = push_ctxt("compare_values"); if ty::type_is_scalar(rhs_t) { let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, ast::BiEq, debug_loc); - return Result::new(cx, cmp); + return Result::new(cx.bl, cmp); } match rhs_t.sty { @@ -861,15 +864,23 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ast::MutImmutable); let rhs_str = alloc_ty(cx, ty_str_slice, "rhs_str"); - Store(cx, GEPi(cx, rhs, &[0, 0]), expr::get_dataptr(cx, rhs_str)); - Store(cx, C_uint(cx.ccx(), pat_len), expr::get_len(cx, rhs_str)); + let rv = GEPi(cx, rhs, &[0, 0]); + let rp = expr::get_dataptr(cx, rhs_str); + Store(cx, rv, rp); + let ty = C_uint(cx.ccx(), pat_len); + let l = expr::get_len(cx, rhs_str); + Store(cx, ty, l); let lhs_str; if val_ty(lhs) == val_ty(rhs) { // Both the discriminant and the pattern are thin pointers lhs_str = alloc_ty(cx, ty_str_slice, "lhs_str"); - Store(cx, GEPi(cx, lhs, &[0, 0]), expr::get_dataptr(cx, lhs_str)); - Store(cx, C_uint(cx.ccx(), pat_len), expr::get_len(cx, lhs_str)); + let lv = GEPi(cx, lhs, &[0, 0]); + let lp = expr::get_dataptr(cx, lhs_str); + Store(cx, lv, lp); + let ty = C_uint(cx.ccx(), pat_len); + let l = expr::get_len(cx, lhs_str); + Store(cx, ty, l); } else { // The discriminant is a fat pointer @@ -888,10 +899,12 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, } /// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map -fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - bindings_map: &BindingsMap<'tcx>, - cs: Option) - -> Block<'blk, 'tcx> { +fn insert_lllocals<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + bindings_map: &BindingsMap<'tcx>, + cs: Option) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); for (&ident, &binding_info) in bindings_map { let llval = match binding_info.trmode { // By value mut binding for a copy type: load from the ptr @@ -900,7 +913,7 @@ fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let llval = Load(bcx, binding_info.llmatch); let datum = Datum::new(llval, binding_info.ty, Lvalue); call_lifetime_start(bcx, llbinding); - bcx = datum.store_to(bcx, llbinding); + bcx.bl = &mut datum.store_to(bcx, llbinding); if let Some(cs) = cs { bcx.fcx.schedule_lifetime_end(cs, llbinding); } @@ -922,20 +935,23 @@ fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } debug!("binding {} to {}", binding_info.id, bcx.val_to_string(llval)); - bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum); + bcx.fcx.lllocals.insert(binding_info.id, datum); debuginfo::create_match_binding_metadata(bcx, ident.name, binding_info); } - bcx + bcx.bl } -fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - guard_expr: &ast::Expr, - data: &ArmData<'p, 'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[ValueRef], - chk: &FailureHandler, - has_genuine_default: bool) - -> Block<'blk, 'tcx> { +fn compile_guard<'a, 'p, 'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + guard_expr: &ast::Expr, + data: &ArmData<'p, 'blk, 'tcx>, + m: &[Match<'a, 'p, 'blk, 'tcx>], + vals: &[ValueRef], + chk: &FailureHandler, + has_genuine_default: bool) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); + debug!("compile_guard(bcx={}, guard_expr={}, m={}, vals={})", bcx.to_str(), bcx.expr_to_string(guard_expr), @@ -943,7 +959,7 @@ fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_map_to_string(vals, |v| bcx.val_to_string(*v))); let _indenter = indenter(); - let mut bcx = insert_lllocals(bcx, &data.bindings_map, None); + let mut bcx = &mut insert_lllocals(bcx, &data.bindings_map, None).with_fcx(bcx.fcx); let val = unpack_datum!(bcx, expr::trans(bcx, guard_expr)); let val = val.to_llbool(bcx); @@ -955,10 +971,11 @@ fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } for (_, &binding_info) in &data.bindings_map { - bcx.fcx.lllocals.borrow_mut().remove(&binding_info.id); + bcx.fcx.lllocals.remove(&binding_info.id); } - with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| { + let not = Not(bcx, val, guard_expr.debug_loc()); + with_cond(bcx, not, |bcx| { for (_, &binding_info) in &data.bindings_map { call_lifetime_end(bcx, binding_info.llmatch); } @@ -973,22 +990,23 @@ fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, compile_submatch(bcx, m, vals, chk, has_genuine_default); } }; - bcx + bcx.bl }) } -fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[ValueRef], - chk: &FailureHandler, - has_genuine_default: bool) { +fn compile_submatch<'a, 'p, 'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + m: &[Match<'a, 'p, 'blk, 'tcx>], + vals: &[ValueRef], + chk: &FailureHandler, + has_genuine_default: bool) { + let mut bcx = &mut bl.with_fcx(fcx); debug!("compile_submatch(bcx={}, m={}, vals={})", bcx.to_str(), m.repr(bcx.tcx()), vec_map_to_string(vals, |v| bcx.val_to_string(*v))); let _indenter = indenter(); let _icx = push_ctxt("match::compile_submatch"); - let mut bcx = bcx; if m.is_empty() { if chk.is_fallible() { chk.handle_fail(bcx); @@ -1028,13 +1046,13 @@ fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } match data.arm.guard { Some(ref guard_expr) => { - bcx = compile_guard(bcx, - &**guard_expr, - m[0].data, - &m[1..m.len()], - vals, - chk, - has_genuine_default); + bcx.bl = compile_guard(bcx, + &**guard_expr, + m[0].data, + &m[1..m.len()], + vals, + chk, + has_genuine_default); } _ => () } @@ -1043,14 +1061,16 @@ fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[ValueRef], - chk: &FailureHandler, - col: usize, - val: ValueRef, - has_genuine_default: bool) { - let fcx = bcx.fcx; +fn compile_submatch_continue<'a, 'p, 'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + m: &[Match<'a, 'p, 'blk, 'tcx>], + vals: &[ValueRef], + chk: &FailureHandler, + col: usize, + val: ValueRef, + has_genuine_default: bool) +{ + let mut bcx = &mut bl.with_fcx(fcx); let tcx = bcx.tcx(); let dm = &tcx.def_map; @@ -1084,7 +1104,9 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // the last field specially: instead of simply passing a // ValueRef pointing to that field, as with all the others, // we skip it and instead construct a 'fat ptr' below. - (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val))) + let dp = expr::get_dataptr(bcx, val); + let ld = Load(bcx, dp); + (arg_count - 1, ld) }; let mut field_vals: Vec = (0..arg_count).map(|ix| adt::trans_field_ptr(bcx, &*repr, struct_val, 0, ix) @@ -1103,9 +1125,12 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let llty = type_of::type_of(bcx.ccx(), unsized_ty); let scratch = alloca_no_lifetime(bcx, llty, "__struct_field_fat_ptr"); let data = adt::trans_field_ptr(bcx, &*repr, struct_val, 0, arg_count); - let len = Load(bcx, expr::get_len(bcx, val)); - Store(bcx, data, expr::get_dataptr(bcx, scratch)); - Store(bcx, len, expr::get_len(bcx, scratch)); + let l = expr::get_len(bcx, val); + let len = Load(bcx, l); + let dp = expr::get_dataptr(bcx, scratch); + let lp = expr::get_len(bcx, scratch); + Store(bcx, data, dp); + Store(bcx, len, lp); field_vals.push(scratch); } _ => {} @@ -1173,8 +1198,8 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } let else_cx = match kind { - NoBranch | Single => bcx, - _ => bcx.fcx.new_temp_block("match_else") + NoBranch | Single => bcx.bl, + _ => &*bcx.fcx.new_temp_block("match_else") }; let sw = if kind == Switch { build::Switch(bcx, test_val, else_cx.llbb, opts.len()) @@ -1182,7 +1207,7 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, C_int(ccx, 0) // Placeholder for when not using a switch }; - let defaults = enter_default(else_cx, dm, m, col, val); + let defaults = enter_default(&mut else_cx.with_fcx(bcx.fcx), dm, m, col, val); let exhaustive = chk.is_infallible() && defaults.is_empty(); let len = opts.len(); @@ -1204,7 +1229,7 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, match opt.trans(bcx) { SingleResult(r) => { AddCase(sw, r.val, opt_cx.llbb); - bcx = r.bcx; + bcx.bl = r.bcx; } _ => { bcx.sess().bug( @@ -1221,25 +1246,29 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, }; let Result { bcx: after_cx, val: matches } = { match opt.trans(bcx) { - SingleResult(Result { bcx, val }) => { - compare_values(bcx, test_val, val, t, debug_loc) + SingleResult(Result { bcx: bl, val }) => { + compare_values(&mut bl.with_fcx(bcx.fcx), + test_val, val, t, debug_loc) } RangeResult(Result { val: vbegin, .. }, - Result { bcx, val: vend }) => { - let llge = compare_scalar_types(bcx, test_val, vbegin, + Result { bcx: bl, val: vend }) => { + let llge = compare_scalar_types(&mut bl.with_fcx(bcx.fcx), + test_val, vbegin, t, ast::BiGe, debug_loc); - let llle = compare_scalar_types(bcx, test_val, vend, + let llle = compare_scalar_types(&mut bl.with_fcx(bcx.fcx), + test_val, vend, t, ast::BiLe, debug_loc); - Result::new(bcx, And(bcx, llge, llle, DebugLoc::None)) + Result::new(bcx.bl, And(bcx, llge, llle, DebugLoc::None)) } - LowerBound(Result { bcx, val }) => { - Result::new(bcx, compare_scalar_types(bcx, test_val, - val, t, ast::BiGe, - debug_loc)) + LowerBound(Result { bcx: bl, val }) => { + Result::new(bcx.bl, compare_scalar_types(&mut bl.with_fcx(bcx.fcx), + test_val, + val, t, ast::BiGe, + debug_loc)) } } }; - bcx = fcx.new_temp_block("compare_next"); + bcx.bl = bcx.fcx.new_temp_block("compare_next"); // If none of the sub-cases match, and the current condition // is guarded or has multiple patterns, move on to the next @@ -1248,9 +1277,10 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let guarded = m[i].data.arm.guard.is_some(); let multi_pats = m[i].pats.len() > 1; if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) { - branch_chk = Some(JumpToBasicBlock(bcx.llbb)); + branch_chk = Some(JumpToBasicBlock(bcx.bl.llbb)); } - CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, debug_loc); + CondBr(&mut after_cx.with_fcx(bcx.fcx), matches, opt_cx.llbb, bcx.bl.llbb, + debug_loc); } _ => () } @@ -1263,29 +1293,30 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, match *opt { Variant(disr_val, ref repr, _, _) => { let ExtractedBlock {vals: argvals, bcx: new_bcx} = - extract_variant_args(opt_cx, &**repr, disr_val, val); + extract_variant_args(&mut opt_cx.with_fcx(bcx.fcx), &**repr, disr_val, val); size = argvals.len(); unpacked = argvals; opt_cx = new_bcx; } SliceLengthEqual(len, _) => { - let args = extract_vec_elems(opt_cx, left_ty, len, 0, val); + let args = extract_vec_elems(&mut opt_cx.with_fcx(bcx.fcx), left_ty, len, 0, val); size = args.vals.len(); unpacked = args.vals.clone(); opt_cx = args.bcx; } SliceLengthGreaterOrEqual(before, after, _) => { - let args = extract_vec_elems(opt_cx, left_ty, before, after, val); + let args = extract_vec_elems(&mut opt_cx.with_fcx(bcx.fcx), + left_ty, before, after, val); size = args.vals.len(); unpacked = args.vals.clone(); opt_cx = args.bcx; } ConstantValue(..) | ConstantRange(..) => () } - let opt_ms = enter_opt(opt_cx, pat_id, dm, m, opt, col, size, val); + let opt_ms = enter_opt(&mut opt_cx.with_fcx(bcx.fcx), pat_id, dm, m, opt, col, size, val); let mut opt_vals = unpacked; opt_vals.push_all(&vals_left[..]); - compile_submatch(opt_cx, + compile_submatch(&mut opt_cx.with_fcx(bcx.fcx), &opt_ms[..], &opt_vals[..], branch_chk.as_ref().unwrap_or(chk), @@ -1302,10 +1333,10 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // condition explicitly rather than (eventually) falling back to // the last default arm. &JumpToBasicBlock(_) if defaults.len() == 1 && has_genuine_default => { - chk.handle_fail(else_cx); + chk.handle_fail(&mut else_cx.with_fcx(bcx.fcx)); } _ => { - compile_submatch(else_cx, + compile_submatch(&mut else_cx.with_fcx(bcx.fcx), &defaults[..], &vals_left[..], chk, @@ -1315,18 +1346,18 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } -pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - match_expr: &ast::Expr, - discr_expr: &ast::Expr, - arms: &[ast::Arm], - dest: Dest) - -> Block<'blk, 'tcx> { +pub fn trans_match<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + match_expr: &ast::Expr, + discr_expr: &ast::Expr, + arms: &[ast::Arm], + dest: Dest) + -> &'blk Block { let _icx = push_ctxt("match::trans_match"); trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest) } /// Checks whether the binding in `discr` is assigned to anywhere in the expression `body` -fn is_discr_reassigned(bcx: Block, discr: &ast::Expr, body: &ast::Expr) -> bool { +fn is_discr_reassigned(bcx: &mut BlockContext, discr: &ast::Expr, body: &ast::Expr) -> bool { let (vid, field) = match discr.node { ast::ExprPath(..) => match bcx.def(discr.id) { def::DefLocal(vid) | def::DefUpvar(vid, _) => (vid, None), @@ -1397,9 +1428,9 @@ impl<'tcx> euv::Delegate<'tcx> for ReassignmentChecker { } } -fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &ast::Pat, - discr: &ast::Expr, body: &ast::Expr) - -> BindingsMap<'tcx> { +fn create_bindings_map<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, pat: &ast::Pat, + discr: &ast::Expr, body: &ast::Expr) + -> BindingsMap<'tcx> { // Create the bindings map, which is a mapping from each binding name // to an alloca() that will be the value for that local variable. // Note that we use the names because each binding will have many ids @@ -1425,23 +1456,26 @@ fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &ast::Pat, llmatch = alloca_no_lifetime(bcx, llvariable_ty.ptr_to(), "__llmatch"); + let name = bcx.name(name); trmode = TrByCopy(alloca_no_lifetime(bcx, llvariable_ty, - &bcx.name(name))); + &name)); } ast::BindByValue(_) => { // in this case, the final type of the variable will be T, // but during matching we need to store a *T as explained // above + let name = bcx.name(name); llmatch = alloca_no_lifetime(bcx, llvariable_ty.ptr_to(), - &bcx.name(name)); + &name); trmode = TrByMove; } ast::BindByRef(_) => { + let name = bcx.name(name); llmatch = alloca_no_lifetime(bcx, llvariable_ty, - &bcx.name(name)); + &name); trmode = TrByRef; } }; @@ -1456,20 +1490,21 @@ fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &ast::Pat, return bindings_map; } -fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>, - match_id: ast::NodeId, - discr_expr: &ast::Expr, - arms: &[ast::Arm], - dest: Dest) -> Block<'blk, 'tcx> { +fn trans_match_inner<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + match_id: ast::NodeId, + discr_expr: &ast::Expr, + arms: &[ast::Arm], + dest: Dest) -> &'blk Block +{ let _icx = push_ctxt("match::trans_match_inner"); - let fcx = scope_cx.fcx; - let mut bcx = scope_cx; + let mut bcx = &mut bl.with_fcx(fcx); let tcx = bcx.tcx(); let discr_datum = unpack_datum!(bcx, expr::trans_to_lvalue(bcx, discr_expr, "match")); - if bcx.unreachable.get() { - return bcx; + if bcx.bl.unreachable.get() { + return bcx.bl; } let t = node_id_type(bcx, discr_expr.id); @@ -1480,19 +1515,19 @@ fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>, }; let arm_datas: Vec = arms.iter().map(|arm| ArmData { - bodycx: fcx.new_id_block("case_body", arm.body.id), + bodycx: bcx.fcx.new_id_block("case_body", arm.body.id), arm: arm, bindings_map: create_bindings_map(bcx, &*arm.pats[0], discr_expr, &*arm.body) }).collect(); - let mut pat_renaming_map = if scope_cx.sess().opts.debuginfo != NoDebugInfo { + let mut pat_renaming_map = if bcx.sess().opts.debuginfo != NoDebugInfo { Some(FnvHashMap()) } else { None }; let arm_pats: Vec>> = { - let mut static_inliner = StaticInliner::new(scope_cx.tcx(), + let mut static_inliner = StaticInliner::new(bcx.tcx(), pat_renaming_map.as_mut()); arm_datas.iter().map(|arm_data| { arm_data.arm.pats.iter().map(|p| static_inliner.fold_pat((*p).clone())).collect() @@ -1524,44 +1559,45 @@ fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>, let mut arm_cxs = Vec::new(); for arm_data in &arm_datas { - let mut bcx = arm_data.bodycx; + let bcx = &mut arm_data.bodycx.with_fcx(bcx.fcx); // insert bindings into the lllocals map and add cleanups - let cs = fcx.push_custom_cleanup_scope(); - bcx = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs))); - bcx = expr::trans_into(bcx, &*arm_data.arm.body, dest); - bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cs); - arm_cxs.push(bcx); + let cs = bcx.fcx.push_custom_cleanup_scope(); + bcx.bl = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs))); + bcx.bl = expr::trans_into(bcx, &*arm_data.arm.body, dest); + let bl = bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx.bl, cs); + arm_cxs.push(bl); } - bcx = scope_cx.fcx.join_blocks(match_id, &arm_cxs[..]); - return bcx; + bcx.fcx.join_blocks(match_id, &arm_cxs[..]) } /// Generates code for a local variable declaration like `let ;` or `let = /// `. -pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - local: &ast::Local) - -> Block<'blk, 'tcx> { +pub fn store_local<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + local: &ast::Local) + -> &'blk Block { let _icx = push_ctxt("match::store_local"); - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let tcx = bcx.tcx(); let pat = &*local.pat; - fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - pat: &ast::Pat) - -> Block<'blk, 'tcx> { + fn create_dummy_locals<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + pat: &ast::Pat) + -> &'blk Block { let _icx = push_ctxt("create_dummy_locals"); // create dummy memory for the variables if we have no // value to store into them immediately let tcx = bcx.tcx(); + let mut bl = bcx.bl; pat_bindings(&tcx.def_map, pat, |_, p_id, _, path1| { let scope = cleanup::var_scope(tcx, p_id); - bcx = mk_binding_alloca( - bcx, p_id, path1.node.name, scope, (), - |(), bcx, llval, ty| { drop_done_fill_mem(bcx, llval, ty); bcx }); + bl = mk_binding_alloca( + &mut bl.with_fcx(bcx.fcx), p_id, path1.node.name, scope, (), + |(), bcx, llval, ty| { drop_done_fill_mem(bcx, llval, ty); bcx.bl }); }); - bcx + bl } match local.init { @@ -1613,12 +1649,14 @@ pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// - `llval` is a pointer to the argument value (in other words, /// if the argument type is `T`, then `llval` is a `T*`). In some /// cases, this code may zero out the memory `llval` points at. -pub fn store_arg<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - pat: &ast::Pat, - arg: Datum<'tcx, Rvalue>, - arg_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> { +pub fn store_arg<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + pat: &ast::Pat, + arg: Datum<'tcx, Rvalue>, + arg_scope: cleanup::ScopeId) + -> &'blk Block { let _icx = push_ctxt("match::store_arg"); + let mut bcx = &mut bl.with_fcx(fcx); match simple_identifier(&*pat) { Some(ident) => { @@ -1631,9 +1669,9 @@ pub fn store_arg<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // already put it in a temporary alloca and gave it up, unless // we emit extra-debug-info, which requires local allocas :(. let arg_val = arg.add_clean(bcx.fcx, arg_scope); - bcx.fcx.lllocals.borrow_mut() + bcx.fcx.lllocals .insert(pat.id, Datum::new(arg_val, arg_ty, Lvalue)); - bcx + bcx.bl } else { mk_binding_alloca( bcx, pat.id, ident.name, arg_scope, arg, @@ -1651,31 +1689,32 @@ pub fn store_arg<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } -fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, - p_id: ast::NodeId, - name: ast::Name, - cleanup_scope: cleanup::ScopeId, - arg: A, - populate: F) - -> Block<'blk, 'tcx> where - F: FnOnce(A, Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, +fn mk_binding_alloca<'r, 'blk, 'tcx, A, F>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + p_id: ast::NodeId, + name: ast::Name, + cleanup_scope: cleanup::ScopeId, + arg: A, + populate: F) + -> &'blk Block where + F: FnOnce(A, &mut BlockContext<'r, 'blk, 'tcx>, ValueRef, Ty<'tcx>) -> &'blk Block, { let var_ty = node_id_type(bcx, p_id); // Allocate memory on stack for the binding. - let llval = alloc_ty(bcx, var_ty, &bcx.name(name)); + let name = bcx.name(name); + let llval = alloc_ty(bcx, var_ty, &name); // Subtle: be sure that we *populate* the memory *before* // we schedule the cleanup. - let bcx = populate(arg, bcx, llval, var_ty); + let bcx = &mut populate(arg, bcx, llval, var_ty).with_fcx(bcx.fcx); bcx.fcx.schedule_lifetime_end(cleanup_scope, llval); bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty); // Now that memory is initialized and has cleanup scheduled, // create the datum and insert into the local variable map. let datum = Datum::new(llval, var_ty, Lvalue); - bcx.fcx.lllocals.borrow_mut().insert(p_id, datum); - bcx + bcx.fcx.lllocals.insert(p_id, datum); + bcx.bl } /// A simple version of the pattern matching code that only handles @@ -1690,24 +1729,26 @@ fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, /// - bcx: starting basic block context /// - pat: the irrefutable pattern being matched. /// - val: the value being matched -- must be an lvalue (by ref, with cleanup) -fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - pat: &ast::Pat, - val: ValueRef, - cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> { +fn bind_irrefutable_pat<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + pat: &ast::Pat, + val: ValueRef, + cleanup_scope: cleanup::ScopeId) + -> &'blk Block +{ + let mut bcx = &mut bl.with_fcx(fcx); debug!("bind_irrefutable_pat(bcx={}, pat={})", bcx.to_str(), pat.repr(bcx.tcx())); if bcx.sess().asm_comments() { - add_comment(bcx, &format!("bind_irrefutable_pat(pat={})", - pat.repr(bcx.tcx()))); + let r = pat.repr(bcx.tcx()); + add_comment(bcx, &format!("bind_irrefutable_pat(pat={})", r)); } let _indenter = indenter(); let _icx = push_ctxt("match::bind_irrefutable_pat"); - let mut bcx = bcx; let tcx = bcx.tcx(); let ccx = bcx.ccx(); match pat.node { @@ -1716,7 +1757,7 @@ fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Allocate the stack slot where the value of this // binding will live and place it into the appropriate // map. - bcx = mk_binding_alloca( + bcx.bl = mk_binding_alloca( bcx, pat.id, path1.node.name, cleanup_scope, (), |(), bcx, llval, ty| { match pat_binding_mode { @@ -1737,14 +1778,14 @@ fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, Store(bcx, val, llval); } - bcx + bcx.bl } } }); } if let Some(ref inner_pat) = *inner { - bcx = bind_irrefutable_pat(bcx, &**inner_pat, val, cleanup_scope); + bcx.bl = bind_irrefutable_pat(bcx, &**inner_pat, val, cleanup_scope); } } ast::PatEnum(_, ref sub_pats) => { @@ -1761,8 +1802,8 @@ fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, val); if let Some(ref sub_pat) = *sub_pats { for (i, &argval) in args.vals.iter().enumerate() { - bcx = bind_irrefutable_pat(bcx, &*sub_pat[i], - argval, cleanup_scope); + bcx.bl = bind_irrefutable_pat(bcx, &*sub_pat[i], + argval, cleanup_scope); } } } @@ -1777,8 +1818,8 @@ fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, for (i, elem) in elems.iter().enumerate() { let fldptr = adt::trans_field_ptr(bcx, &*repr, val, 0, i); - bcx = bind_irrefutable_pat(bcx, &**elem, - fldptr, cleanup_scope); + bcx.bl = bind_irrefutable_pat(bcx, &**elem, + fldptr, cleanup_scope); } } } @@ -1797,7 +1838,7 @@ fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let ix = ty::field_idx_strict(tcx, f.node.ident.name, field_tys); let fldptr = adt::trans_field_ptr(bcx, &*pat_repr, val, discr, ix); - bcx = bind_irrefutable_pat(bcx, &*f.node.pat, fldptr, cleanup_scope); + bcx.bl = bind_irrefutable_pat(bcx, &*f.node.pat, fldptr, cleanup_scope); } }) } @@ -1805,16 +1846,16 @@ fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let repr = adt::represent_node(bcx, pat.id); for (i, elem) in elems.iter().enumerate() { let fldptr = adt::trans_field_ptr(bcx, &*repr, val, 0, i); - bcx = bind_irrefutable_pat(bcx, &**elem, fldptr, cleanup_scope); + bcx.bl = bind_irrefutable_pat(bcx, &**elem, fldptr, cleanup_scope); } } ast::PatBox(ref inner) => { let llbox = Load(bcx, val); - bcx = bind_irrefutable_pat(bcx, &**inner, llbox, cleanup_scope); + bcx.bl = bind_irrefutable_pat(bcx, &**inner, llbox, cleanup_scope); } ast::PatRegion(ref inner, _) => { let loaded_val = Load(bcx, val); - bcx = bind_irrefutable_pat(bcx, &**inner, loaded_val, cleanup_scope); + bcx.bl = bind_irrefutable_pat(bcx, &**inner, loaded_val, cleanup_scope); } ast::PatVec(ref before, ref slice, ref after) => { let pat_ty = node_id_type(bcx, pat.id); @@ -1828,13 +1869,13 @@ fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } &None => () } - bcx = before + bcx.bl = before .iter() .chain(slice.iter()) .chain(after.iter()) .zip(extracted.vals.into_iter()) - .fold(bcx, |bcx, (inner, elem)| - bind_irrefutable_pat(bcx, &**inner, elem, cleanup_scope) + .fold(bcx.bl, |bl, (inner, elem)| + bind_irrefutable_pat(&mut bl.with_fcx(bcx.fcx), &**inner, elem, cleanup_scope) ); } ast::PatMac(..) => { @@ -1843,5 +1884,5 @@ fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ast::PatQPath(..) | ast::PatWild(_) | ast::PatLit(_) | ast::PatRange(_, _) => () } - return bcx; + return bcx.bl; } diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs index 6d4c72c132a59..d15f746d627b3 100644 --- a/src/librustc_trans/trans/adt.rs +++ b/src/librustc_trans/trans/adt.rs @@ -134,8 +134,8 @@ pub struct Struct<'tcx> { /// Convenience for `represent_type`. There should probably be more or /// these, for places in trans where the `Ty` isn't directly /// available. -pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node: ast::NodeId) -> Rc> { +pub fn represent_node<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + node: ast::NodeId) -> Rc> { represent_type(bcx.ccx(), node_id_type(bcx, node)) } @@ -780,9 +780,9 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>, /// destructuring; this may or may not involve the actual discriminant. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, scrutinee: ValueRef) - -> (_match::BranchKind, Option) { +pub fn trans_switch<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + r: &Repr<'tcx>, scrutinee: ValueRef) + -> (_match::BranchKind, Option) { match *r { CEnum(..) | General(..) | RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { @@ -806,8 +806,8 @@ pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool { } /// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, - scrutinee: ValueRef, cast_to: Option) +pub fn trans_get_discr<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, r: &Repr<'tcx>, + scrutinee: ValueRef, cast_to: Option) -> ValueRef { debug!("trans_get_discr r: {:?}", r); let val = match *r { @@ -820,7 +820,8 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, RawNullablePointer { nndiscr, nnty, .. } => { let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); - ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None) + let op = Load(bcx, scrutinee); + ICmp(bcx, cmp, op, C_null(llptrty), DebugLoc::None) } StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee) @@ -832,7 +833,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, } } -fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField, +fn struct_wrapped_nullable_bitdiscr(bcx: &mut BlockContext, nndiscr: Disr, discrfield: &DiscrField, scrutinee: ValueRef) -> ValueRef { let llptrptr = GEPi(bcx, scrutinee, &discrfield[..]); let llptr = Load(bcx, llptrptr); @@ -841,7 +842,7 @@ fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &Disc } /// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr) +fn load_discr(bcx: &mut BlockContext, ity: IntType, ptr: ValueRef, min: Disr, max: Disr) -> ValueRef { let llty = ll_inttype(bcx.ccx(), ity); assert_eq!(val_ty(ptr), llty.ptr_to()); @@ -868,16 +869,16 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr) /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr) - -> _match::OptResult<'blk, 'tcx> { +pub fn trans_case<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, r: &Repr, discr: Disr) + -> _match::OptResult<'blk> { match *r { CEnum(ity, _, _) => { - _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity), - discr as u64, true))) + _match::SingleResult(Result::new(bcx.bl, C_integral(ll_inttype(bcx.ccx(), ity), + discr as u64, true))) } General(ity, _, _) => { - _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity), - discr as u64, true))) + _match::SingleResult(Result::new(bcx.bl, C_integral(ll_inttype(bcx.ccx(), ity), + discr as u64, true))) } Univariant(..) => { bcx.ccx().sess().bug("no cases for univariants or structs") @@ -885,35 +886,39 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr) RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { assert!(discr == 0 || discr == 1); - _match::SingleResult(Result::new(bcx, C_bool(bcx.ccx(), discr != 0))) + _match::SingleResult(Result::new(bcx.bl, C_bool(bcx.ccx(), discr != 0))) } } } /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, - val: ValueRef, discr: Disr) { +pub fn trans_set_discr<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, r: &Repr<'tcx>, + val: ValueRef, discr: Disr) { match *r { CEnum(ity, min, max) => { assert_discr_in_range(ity, min, max, discr); - Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true), + let ty = ll_inttype(bcx.ccx(), ity); + Store(bcx, C_integral(ty, discr as u64, true), val); } General(ity, ref cases, dtor) => { if dtor_active(dtor) { let ptr = trans_field_ptr(bcx, r, val, discr, cases[discr as usize].fields.len() - 2); - Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize), ptr); + let v = C_u8(bcx.ccx(), DTOR_NEEDED as usize); + Store(bcx, v, ptr); } - Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true), - GEPi(bcx, val, &[0, 0])); + let ty = ll_inttype(bcx.ccx(), ity); + let p = GEPi(bcx, val, &[0, 0]); + Store(bcx, C_integral(ty, discr as u64, true), p); } Univariant(ref st, dtor) => { assert_eq!(discr, 0); if dtor_active(dtor) { - Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize), - GEPi(bcx, val, &[0, st.fields.len() - 1])); + let v = C_u8(bcx.ccx(), DTOR_NEEDED as usize); + let p = GEPi(bcx, val, &[0, st.fields.len() - 1]); + Store(bcx, v, p); } } RawNullablePointer { nndiscr, nnty, ..} => { @@ -962,8 +967,8 @@ pub fn num_args(r: &Repr, discr: Disr) -> usize { } /// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, - val: ValueRef, discr: Disr, ix: usize) -> ValueRef { +pub fn trans_field_ptr<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, r: &Repr<'tcx>, + val: ValueRef, discr: Disr, ix: usize) -> ValueRef { // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr // someday), it will need to return a possibly-new bcx as well. @@ -1001,8 +1006,9 @@ pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, } } -pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: ValueRef, - ix: usize, needs_cast: bool) -> ValueRef { +pub fn struct_field_ptr<'r, 'blk, 'tcx> + (bcx: &mut BlockContext<'r, 'blk, 'tcx>, st: &Struct<'tcx>, val: ValueRef, + ix: usize, needs_cast: bool) -> ValueRef { let val = if needs_cast { let ccx = bcx.ccx(); let fields = st.fields.iter().map(|&ty| type_of::type_of(ccx, ty)).collect::>(); @@ -1015,29 +1021,29 @@ pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, v GEPi(bcx, val, &[0, ix]) } -pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, - value: ValueRef, - mut f: F) - -> Block<'blk, 'tcx> where - F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>, +pub fn fold_variants<'r, 'blk, 'tcx, F> + (bcx: &mut BlockContext<'r, 'blk, 'tcx>, + r: &Repr<'tcx>, + value: ValueRef, + mut f: F) + -> &'blk Block where + F: for<'a> FnMut(&mut BlockContext<'a, 'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> &'blk Block, { - let fcx = bcx.fcx; match *r { Univariant(ref st, _) => { f(bcx, st, value) } General(ity, ref cases, _) => { let ccx = bcx.ccx(); - let unr_cx = fcx.new_temp_block("enum-variant-iter-unr"); - Unreachable(unr_cx); + let unr_cx = bcx.fcx.new_temp_block("enum-variant-iter-unr"); + Unreachable(&mut unr_cx.with_fcx(bcx.fcx)); let discr_val = trans_get_discr(bcx, r, value, None); let llswitch = Switch(bcx, discr_val, unr_cx.llbb, cases.len()); - let bcx_next = fcx.new_temp_block("enum-variant-iter-next"); + let bcx_next = bcx.fcx.new_temp_block("enum-variant-iter-next"); for (discr, case) in cases.iter().enumerate() { - let mut variant_cx = fcx.new_temp_block( + let variant_cx = bcx.fcx.new_temp_block( &format!("enum-variant-iter-{}", &discr.to_string()) ); let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true); @@ -1046,10 +1052,12 @@ pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, let fields = case.fields.iter().map(|&ty| type_of::type_of(bcx.ccx(), ty)).collect::>(); let real_ty = Type::struct_(ccx, &fields[..], case.packed); - let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to()); + let variant_value = PointerCast(&mut variant_cx.with_fcx(bcx.fcx), + value, real_ty.ptr_to()); - variant_cx = f(variant_cx, case, variant_value); - Br(variant_cx, bcx_next.llbb, DebugLoc::None); + let mut bcx = &mut variant_cx.with_fcx(bcx.fcx); + let variant_cx = f(bcx, case, variant_value); + Br(&mut variant_cx.with_fcx(bcx.fcx), bcx_next.llbb, DebugLoc::None); } bcx_next @@ -1059,11 +1067,13 @@ pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, } /// Access the struct drop flag, if present. -pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, - val: ValueRef) - -> datum::DatumBlock<'blk, 'tcx, datum::Expr> +pub fn trans_drop_flag_ptr<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + r: &Repr<'tcx>, + val: ValueRef) + -> datum::DatumBlock<'blk, 'tcx, datum::Expr> { + let mut bcx = &mut bl.with_fcx(fcx); let tcx = bcx.tcx(); let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), tcx.dtor_type()); match *r { @@ -1072,20 +1082,19 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock() } General(_, _, dtor) if dtor_active(dtor) => { - let fcx = bcx.fcx; - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); + let custom_cleanup_scope = bcx.fcx.push_custom_cleanup_scope(); let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( bcx, tcx.dtor_type(), "drop_flag", - cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| bcx + cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| bcx.bl )); - bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { + let bl = fold_variants(bcx, r, val, |variant_cx, st, value| { let ptr = struct_field_ptr(variant_cx, st, value, (st.fields.len() - 1), false); datum::Datum::new(ptr, ptr_ty, datum::Lvalue) .store_to(variant_cx, scratch.val) }); let expr_datum = scratch.to_expr_datum(); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - datum::DatumBlock::new(bcx, expr_datum) + bcx.fcx.pop_custom_cleanup_scope(custom_cleanup_scope); + datum::DatumBlock::new(bl, expr_datum) } _ => bcx.ccx().sess().bug("tried to get drop flag of non-droppable type") } diff --git a/src/librustc_trans/trans/asm.rs b/src/librustc_trans/trans/asm.rs index 27128827e2676..8f68de00f1883 100644 --- a/src/librustc_trans/trans/asm.rs +++ b/src/librustc_trans/trans/asm.rs @@ -25,20 +25,24 @@ use std::ffi::CString; use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM -pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) - -> Block<'blk, 'tcx> { - let fcx = bcx.fcx; - let mut bcx = bcx; +pub fn trans_inline_asm<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + ia: &ast::InlineAsm) + -> &'blk Block +{ + let mut bcx = &mut bl.with_fcx(fcx); let mut constraints = Vec::new(); let mut output_types = Vec::new(); - let temp_scope = fcx.push_custom_cleanup_scope(); + let temp_scope = bcx.fcx.push_custom_cleanup_scope(); let mut ext_inputs = Vec::new(); let mut ext_constraints = Vec::new(); // Prepare the output operands - let outputs = ia.outputs.iter().enumerate().map(|(i, &(ref c, ref out, is_rw))| { + let mut outputs = Vec::new(); + + for (i, &(ref c, ref out, is_rw)) in ia.outputs.iter().enumerate() { constraints.push((*c).clone()); let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out)); @@ -46,35 +50,38 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) let val = out_datum.val; if is_rw { ext_inputs.push(unpack_result!(bcx, { + let expr_ty = expr_ty(bcx, &**out); callee::trans_arg_datum(bcx, - expr_ty(bcx, &**out), - out_datum, - cleanup::CustomScope(temp_scope), - callee::DontAutorefArg) + expr_ty, + out_datum, + cleanup::CustomScope(temp_scope), + callee::DontAutorefArg) })); ext_constraints.push(i.to_string()); } - val + outputs.push(val); - }).collect::>(); + } // Now the input operands - let mut inputs = ia.inputs.iter().map(|&(ref c, ref input)| { + let mut inputs = Vec::new(); + for &(ref c, ref input) in &ia.inputs { constraints.push((*c).clone()); let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input)); - unpack_result!(bcx, { + inputs.push(unpack_result!(bcx, { + let expr_ty = expr_ty(bcx, &**input); callee::trans_arg_datum(bcx, - expr_ty(bcx, &**input), + expr_ty, in_datum, cleanup::CustomScope(temp_scope), callee::DontAutorefArg) - }) - }).collect::>(); + })); + } inputs.push_all(&ext_inputs[..]); // no failure occurred preparing operands, no need to cleanup - fcx.pop_custom_cleanup_scope(temp_scope); + bcx.fcx.pop_custom_cleanup_scope(temp_scope); let clobbers = ia.clobbers.iter() .map(|s| format!("~{{{}}}", &s)); @@ -144,7 +151,6 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1)); } - return bcx; - + return bcx.bl; } diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs index e44aae76c19ad..b90ff71e2ffb1 100644 --- a/src/librustc_trans/trans/base.rs +++ b/src/librustc_trans/trans/base.rs @@ -52,7 +52,7 @@ use trans::callee; use trans::cleanup::CleanupMethods; use trans::cleanup; use trans::closure; -use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_integral}; +use trans::common::{BlockContext, Block, C_bool, C_bytes_in_context, C_i32, C_int, C_integral}; use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; use trans::common::{CrateContext, FunctionContext}; use trans::common::{Result, NodeIdAndSpan}; @@ -86,7 +86,7 @@ use util::nodemap::NodeMap; use arena::TypedArena; use libc::c_uint; use std::ffi::{CStr, CString}; -use std::cell::{Cell, RefCell}; +use std::cell::{RefCell}; use std::collections::HashSet; use std::mem; use std::str; @@ -244,7 +244,7 @@ pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: ast::DefId, return c; } -fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn require_alloc_fn<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> ast::DefId { match bcx.tcx().lang_items.require(it) { Ok(id) => id, @@ -259,23 +259,24 @@ fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // The following malloc_raw_dyn* functions allocate a box to contain // a given type, but with a potentially dynamic size. -pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn malloc_raw_dyn<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, llty_ptr: Type, info_ty: Ty<'tcx>, size: ValueRef, align: ValueRef, debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { + -> Result<'blk> { let _icx = push_ctxt("malloc_raw_exchange"); // Allocate space: + let a = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); let r = callee::trans_lang_call(bcx, - require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem), + a, &[size, align], None, debug_loc); - Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) + Result::new(r.bcx, PointerCast(&mut r.bcx.with_fcx(bcx.fcx), r.val, llty_ptr)) } @@ -311,7 +312,7 @@ pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: ast::BinOp_) } } -pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn compare_scalar_types<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, lhs: ValueRef, rhs: ValueRef, t: Ty<'tcx>, @@ -330,23 +331,27 @@ pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } ty::ty_bare_fn(..) | ty::ty_bool | ty::ty_uint(_) | ty::ty_char => { - ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc) + let p = bin_op_to_icmp_predicate(bcx.ccx(), op, false); + ICmp(bcx, p, lhs, rhs, debug_loc) } ty::ty_ptr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => { - ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc) + let p = bin_op_to_icmp_predicate(bcx.ccx(), op, false); + ICmp(bcx, p, lhs, rhs, debug_loc) } ty::ty_int(_) => { - ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, true), lhs, rhs, debug_loc) + let p = bin_op_to_icmp_predicate(bcx.ccx(), op, true); + ICmp(bcx, p, lhs, rhs, debug_loc) } ty::ty_float(_) => { - FCmp(bcx, bin_op_to_fcmp_predicate(bcx.ccx(), op), lhs, rhs, debug_loc) + let p = bin_op_to_fcmp_predicate(bcx.ccx(), op); + FCmp(bcx, p, lhs, rhs, debug_loc) } // Should never get here, because t is scalar. _ => bcx.sess().bug("non-scalar type passed to compare_scalar_types") } } -pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn compare_simd_types<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, lhs: ValueRef, rhs: ValueRef, t: Ty<'tcx>, @@ -372,37 +377,42 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // to get the correctly sized type. This will compile to a single instruction // once the IR is converted to assembly if the SIMD instruction is supported // by the target architecture. - SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), val_ty(lhs)) + let c = ICmp(bcx, cmp, lhs, rhs, debug_loc); + SExt(bcx, c, val_ty(lhs)) } // Iterates through the elements of a structural type. -pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, - av: ValueRef, - t: Ty<'tcx>, - mut f: F) - -> Block<'blk, 'tcx> where - F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, +pub fn iter_structural_ty<'r, 'blk, 'tcx, F> + (&mut BlockContext { mut bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + av: ValueRef, + t: Ty<'tcx>, + mut f: F) + -> &'blk Block where + F: for <'a> FnMut(&mut BlockContext<'a, 'blk, 'tcx>, ValueRef, Ty<'tcx>) -> &'blk Block, { let _icx = push_ctxt("iter_structural_ty"); - - fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, - av: ValueRef, - variant: &ty::VariantInfo<'tcx>, - substs: &Substs<'tcx>, - f: &mut F) - -> Block<'blk, 'tcx> where - F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, + let mut cx = &mut bl.with_fcx(fcx); + + fn iter_variant<'r, 'blk, 'tcx, F> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + repr: &adt::Repr<'tcx>, + av: ValueRef, + variant: &ty::VariantInfo<'tcx>, + substs: &Substs<'tcx>, + f: &mut F) + -> &'blk Block where + F: for <'a> FnMut(&mut BlockContext<'a, 'blk, 'tcx>, ValueRef, Ty<'tcx>) -> &'blk Block, { let _icx = push_ctxt("iter_variant"); + let mut cx = &mut bl.with_fcx(fcx); let tcx = cx.tcx(); - let mut cx = cx; for (i, &arg) in variant.args.iter().enumerate() { let arg = monomorphize::apply_param_substs(tcx, substs, &arg); - cx = f(cx, adt::trans_field_ptr(cx, repr, av, variant.disr_val, i), arg); + let p = adt::trans_field_ptr(cx, repr, av, variant.disr_val, i); + cx.bl = f(cx, p, arg); } - return cx; + return cx.bl; } let (data_ptr, info) = if common::type_is_sized(cx.tcx(), t) { @@ -413,7 +423,6 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, (Load(cx, data), Some(Load(cx, info))) }; - let mut cx = cx; match t.sty { ty::ty_struct(..) => { let repr = adt::represent_type(cx.ccx(), t); @@ -426,11 +435,13 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, llfld_a } else { let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter"); - Store(cx, llfld_a, GEPi(cx, scratch.val, &[0, abi::FAT_PTR_ADDR])); - Store(cx, info.unwrap(), GEPi(cx, scratch.val, &[0, abi::FAT_PTR_EXTRA])); + let v = GEPi(cx, scratch.val, &[0, abi::FAT_PTR_ADDR]); + Store(cx, llfld_a, v); + let v = GEPi(cx, scratch.val, &[0, abi::FAT_PTR_EXTRA]); + Store(cx, info.unwrap(), v); scratch.val }; - cx = f(cx, val, field_ty); + bl = f(cx, val, field_ty); } }) } @@ -440,28 +451,29 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, let upvars = typer.closure_upvars(def_id, substs).unwrap(); for (i, upvar) in upvars.iter().enumerate() { let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i); - cx = f(cx, llupvar, upvar.ty); + bl = f(cx, llupvar, upvar.ty); } } ty::ty_vec(_, Some(n)) => { let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n); let unit_ty = ty::sequence_element_type(cx.tcx(), t); - cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f); + bl = tvec::iter_vec_raw(cx, base, unit_ty, len, f); } ty::ty_vec(_, None) | ty::ty_str => { let unit_ty = ty::sequence_element_type(cx.tcx(), t); - cx = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f); + bl = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f); } ty::ty_tup(ref args) => { let repr = adt::represent_type(cx.ccx(), t); for (i, arg) in args.iter().enumerate() { let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i); - cx = f(cx, llfld_a, *arg); + let bl = f(cx, llfld_a, *arg); + cx.bl = bl; } + bl = cx.bl; } ty::ty_enum(tid, substs) => { - let fcx = cx.fcx; - let ccx = fcx.ccx; + let ccx = cx.fcx.ccx; let repr = adt::represent_type(ccx, t); let variants = ty::enum_variants(ccx.tcx(), tid); @@ -474,21 +486,22 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, (_match::Single, None) => { if n_variants != 0 { assert!(n_variants == 1); - cx = iter_variant(cx, &*repr, av, &*(*variants)[0], + bl = iter_variant(cx, &*repr, av, &*(*variants)[0], substs, &mut f); } } (_match::Switch, Some(lldiscrim_a)) => { - cx = f(cx, lldiscrim_a, cx.tcx().types.isize); - let unr_cx = fcx.new_temp_block("enum-iter-unr"); - Unreachable(unr_cx); + let s = cx.tcx().types.isize; + cx.bl = f(cx, lldiscrim_a, s); + let unr_cx = cx.fcx.new_temp_block("enum-iter-unr"); + Unreachable(&mut unr_cx.with_fcx(cx.fcx)); let llswitch = Switch(cx, lldiscrim_a, unr_cx.llbb, n_variants); - let next_cx = fcx.new_temp_block("enum-iter-next"); + let next_cx = cx.fcx.new_temp_block("enum-iter-next"); for variant in &(*variants) { let variant_cx = - fcx.new_temp_block( + cx.fcx.new_temp_block( &format!("enum-iter-variant-{}", &variant.disr_val.to_string()) ); @@ -500,15 +513,15 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, in iter_structural_ty") } let variant_cx = - iter_variant(variant_cx, + iter_variant(&mut variant_cx.with_fcx(cx.fcx), &*repr, data_ptr, &**variant, substs, &mut f); - Br(variant_cx, next_cx.llbb, DebugLoc::None); + Br(&mut variant_cx.with_fcx(cx.fcx), next_cx.llbb, DebugLoc::None); } - cx = next_cx; + bl = next_cx; } _ => ccx.sess().unimpl("value from adt::trans_switch \ in iter_structural_ty") @@ -519,34 +532,35 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, ty_to_string(cx.tcx(), t))) } } - return cx; + return bl; } -pub fn cast_shift_expr_rhs(cx: Block, +pub fn cast_shift_expr_rhs(cx: &mut BlockContext, op: ast::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - cast_shift_rhs(op, lhs, rhs, - |a,b| Trunc(cx, a, b), - |a,b| ZExt(cx, a, b)) + cast_shift_rhs(cx, op, lhs, rhs, + |cx,a,b| Trunc(cx, a, b), + |cx,a,b| ZExt(cx, a, b)) } pub fn cast_shift_const_rhs(op: ast::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - cast_shift_rhs(op, lhs, rhs, - |a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) }, - |a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) }) -} - -fn cast_shift_rhs(op: ast::BinOp_, - lhs: ValueRef, - rhs: ValueRef, - trunc: F, - zext: G) - -> ValueRef where - F: FnOnce(ValueRef, Type) -> ValueRef, - G: FnOnce(ValueRef, Type) -> ValueRef, + cast_shift_rhs((), op, lhs, rhs, + |_, a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) }, + |_, a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) }) +} + +fn cast_shift_rhs(t: T, + op: ast::BinOp_, + lhs: ValueRef, + rhs: ValueRef, + trunc: F, + zext: G) + -> ValueRef where + F: FnOnce(T, ValueRef, Type) -> ValueRef, + G: FnOnce(T, ValueRef, Type) -> ValueRef, { // Shifts may have any size int on the rhs if ast_util::is_shift_binop(op) { @@ -557,11 +571,11 @@ fn cast_shift_rhs(op: ast::BinOp_, let rhs_sz = rhs_llty.int_width(); let lhs_sz = lhs_llty.int_width(); if lhs_sz < rhs_sz { - trunc(rhs, lhs_llty) + trunc(t, rhs, lhs_llty) } else if lhs_sz > rhs_sz { // FIXME (#1877: If shifting by negative // values becomes not undefined then this is wrong. - zext(rhs, lhs_llty) + zext(t, rhs, lhs_llty) } else { rhs } @@ -570,7 +584,7 @@ fn cast_shift_rhs(op: ast::BinOp_, } } -pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, +pub fn llty_and_min_for_signed_ty<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, val_t: Ty<'tcx>) -> (Type, u64) { match val_t.sty { ty::ty_int(t) => { @@ -589,14 +603,14 @@ pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, } } -pub fn fail_if_zero_or_overflows<'blk, 'tcx>( - cx: Block<'blk, 'tcx>, +pub fn fail_if_zero_or_overflows<'r, 'blk, 'tcx>( + cx: &mut BlockContext<'r, 'blk, 'tcx>, call_info: NodeIdAndSpan, divrem: ast::BinOp, lhs: ValueRef, rhs: ValueRef, rhs_t: Ty<'tcx>) - -> Block<'blk, 'tcx> { + -> &'blk Block { let (zero_text, overflow_text) = if divrem.node == ast::BiDiv { ("attempted to divide by zero", "attempted to divide with overflow") @@ -618,9 +632,10 @@ pub fn fail_if_zero_or_overflows<'blk, 'tcx>( ty::ty_struct(_, _) if type_is_simd(cx.tcx(), rhs_t) => { let mut res = C_bool(cx.ccx(), false); for i in 0 .. simd_size(cx.tcx(), rhs_t) { - res = Or(cx, res, - IsNull(cx, - ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), debug_loc); + let ty = C_int(cx.ccx(), i as i64); + let ee = ExtractElement(cx, rhs, ty); + let is_null = IsNull(cx, ee); + res = Or(cx, res, is_null, debug_loc); } (res, false) } @@ -644,9 +659,9 @@ pub fn fail_if_zero_or_overflows<'blk, 'tcx>( // integers, no action beyond checking for zero need be taken. if is_signed { let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t); - let minus_one = ICmp(bcx, llvm::IntEQ, rhs, + let minus_one = ICmp(&mut bcx.with_fcx(cx.fcx), llvm::IntEQ, rhs, C_integral(llty, !0, false), debug_loc); - with_cond(bcx, minus_one, |bcx| { + with_cond(&mut bcx.with_fcx(cx.fcx), minus_one, |bcx| { let is_min = ICmp(bcx, llvm::IntEQ, lhs, C_integral(llty, min, true), debug_loc); with_cond(bcx, is_min, |bcx| { @@ -687,20 +702,20 @@ pub fn trans_external_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } } -pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llfn: ValueRef, - llargs: &[ValueRef], - fn_ty: Ty<'tcx>, - debug_loc: DebugLoc) - -> (ValueRef, Block<'blk, 'tcx>) { +pub fn invoke<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + llfn: ValueRef, + llargs: &[ValueRef], + fn_ty: Ty<'tcx>, + debug_loc: DebugLoc) + -> (ValueRef, &'blk Block) { let _icx = push_ctxt("invoke_"); - if bcx.unreachable.get() { - return (C_null(Type::i8(bcx.ccx())), bcx); + if bcx.bl.unreachable.get() { + return (C_null(Type::i8(bcx.ccx())), bcx.bl); } let attributes = attributes::from_fn_type(bcx.ccx(), fn_ty); - match bcx.opt_node_id { + match bcx.bl.opt_node_id { None => { debug!("invoke at ???"); } @@ -710,7 +725,7 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } if need_invoke(bcx) { - debug!("invoking {} at {:?}", bcx.val_to_string(llfn), bcx.llbb); + debug!("invoking {} at {:?}", bcx.val_to_string(llfn), bcx.bl.llbb); for &llarg in llargs { debug!("arg: {}", bcx.val_to_string(llarg)); } @@ -726,7 +741,7 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, debug_loc); return (llresult, normal_bcx); } else { - debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.llbb); + debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.bl.llbb); for &llarg in llargs { debug!("arg: {}", bcx.val_to_string(llarg)); } @@ -736,24 +751,24 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, &llargs[..], Some(attributes), debug_loc); - return (llresult, bcx); + return (llresult, bcx.bl); } } -pub fn need_invoke(bcx: Block) -> bool { +pub fn need_invoke(bcx: &mut BlockContext) -> bool { if bcx.sess().no_landing_pads() { return false; } // Avoid using invoke if we are already inside a landing pad. - if bcx.is_lpad { + if bcx.bl.is_lpad { return false; } bcx.fcx.needs_invoke() } -pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, +pub fn load_if_immediate<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef { let _icx = push_ctxt("load_if_immediate"); if type_is_immediate(cx.ccx(), t) { return load_ty(cx, v, t); } @@ -763,9 +778,9 @@ pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, /// Helper for loading values from memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. Also handles various special cases where the type /// gives us better information about what we are loading. -pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, +pub fn load_ty<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { - if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) { + if cx.bl.unreachable.get() || type_is_zero_size(cx.ccx(), t) { return C_undef(type_of::type_of(cx.ccx(), t)); } @@ -812,83 +827,90 @@ pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. -pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { +pub fn store_ty<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, + v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { + if cx.bl.unreachable.get() { return; } - let store = Store(cx, to_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t)); + let a1 = to_arg_ty(cx, v, t); + let a2 = to_arg_ty_ptr(cx, dst, t); + let store = Store(cx, a1, a2); unsafe { llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t)); } } -pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { +pub fn to_arg_ty(bcx: &mut BlockContext, val: ValueRef, ty: Ty) -> ValueRef { if ty::type_is_bool(ty) { - ZExt(bcx, val, Type::i8(bcx.ccx())) + let ty = Type::i8(bcx.ccx()); + ZExt(bcx, val, ty) } else { val } } -pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { +pub fn from_arg_ty(bcx: &mut BlockContext, val: ValueRef, ty: Ty) -> ValueRef { if ty::type_is_bool(ty) { - Trunc(bcx, val, Type::i1(bcx.ccx())) + let ty = Type::i1(bcx.ccx()); + Trunc(bcx, val, ty) } else { val } } -pub fn to_arg_ty_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef { +pub fn to_arg_ty_ptr<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef { if type_is_immediate(bcx.ccx(), ty) && type_of::type_of(bcx.ccx(), ty).is_aggregate() { // We want to pass small aggregates as immediate values, but using an aggregate LLVM type // for this leads to bad optimizations, so its arg type is an appropriately sized integer // and we have to convert it - BitCast(bcx, ptr, type_of::arg_type_of(bcx.ccx(), ty).ptr_to()) + let ty = type_of::arg_type_of(bcx.ccx(), ty); + BitCast(bcx, ptr, ty.ptr_to()) } else { ptr } } -pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &ast::Local) - -> Block<'blk, 'tcx> { +pub fn init_local<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, local: &ast::Local) + -> &'blk Block { debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id); let _indenter = indenter(); let _icx = push_ctxt("init_local"); _match::store_local(bcx, local) } -pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>, +pub fn raw_block<'r, 'blk, 'tcx>(fcx: &mut FunctionContext<'blk, 'tcx>, is_lpad: bool, llbb: BasicBlockRef) - -> Block<'blk, 'tcx> { - common::BlockS::new(llbb, is_lpad, None, fcx) + -> &'blk mut Block { + common::Block::new(llbb, is_lpad, None).alloc(fcx) } -pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - val: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>, +pub fn with_cond<'r, 'blk, 'tcx, F> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + val: ValueRef, + f: F) + -> &'blk Block where + F: for<'a> FnOnce(&'a mut BlockContext<'a, 'blk, 'tcx>) -> &'blk Block { let _icx = push_ctxt("with_cond"); - if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) { - return bcx; + if bl.unreachable.get() || common::const_to_opt_uint(val) == Some(0) { + return bl; } - let fcx = bcx.fcx; let next_cx = fcx.new_temp_block("next"); let cond_cx = fcx.new_temp_block("cond"); - CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); - let after_cx = f(cond_cx); + CondBr(&mut bl.with_fcx(fcx), val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); + let after_cx = f(&mut cond_cx.with_fcx(fcx)); if !after_cx.terminated.get() { - Br(after_cx, next_cx.llbb, DebugLoc::None); + Br(&mut after_cx.with_fcx(fcx), next_cx.llbb, DebugLoc::None); } next_cx } -pub fn call_lifetime_start(cx: Block, ptr: ValueRef) { +pub fn call_lifetime_start(cx: &mut BlockContext, ptr: ValueRef) { if cx.sess().opts.optimize == config::No { return; } @@ -902,7 +924,7 @@ pub fn call_lifetime_start(cx: Block, ptr: ValueRef) { Call(cx, lifetime_start, &[llsize, ptr], None, DebugLoc::None); } -pub fn call_lifetime_end(cx: Block, ptr: ValueRef) { +pub fn call_lifetime_end(cx: &mut BlockContext, ptr: ValueRef) { if cx.sess().opts.optimize == config::No { return; } @@ -916,7 +938,11 @@ pub fn call_lifetime_end(cx: Block, ptr: ValueRef) { Call(cx, lifetime_end, &[llsize, ptr], None, DebugLoc::None); } -pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) { +pub fn call_memcpy(cx: &mut BlockContext, + dst: ValueRef, + src: ValueRef, + n_bytes: ValueRef, + align: u32) { let _icx = push_ctxt("call_memcpy"); let ccx = cx.ccx(); let key = match &ccx.sess().target.target.target_pointer_width[..] { @@ -933,7 +959,7 @@ pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, a Call(cx, memcpy, &[dst_ptr, src_ptr, size, align, volatile], None, DebugLoc::None); } -pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn memcpy_ty<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { let _icx = push_ctxt("memcpy_ty"); @@ -944,19 +970,22 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let llalign = type_of::align_of(ccx, t); call_memcpy(bcx, dst, src, llsz, llalign as u32); } else { - store_ty(bcx, load_ty(bcx, src, t), dst, t); + let ty = load_ty(bcx, src, t); + store_ty(bcx, ty, dst, t); } } -pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { return; } +pub fn drop_done_fill_mem<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, + llptr: ValueRef, t: Ty<'tcx>) { + if cx.bl.unreachable.get() { return; } let _icx = push_ctxt("drop_done_fill_mem"); let bcx = cx; memfill(&B(bcx), llptr, t, adt::DTOR_DONE); } -pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { return; } +pub fn init_zero_mem<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, + llptr: ValueRef, t: Ty<'tcx>) { + if cx.bl.unreachable.get() { return; } let _icx = push_ctxt("init_zero_mem"); let bcx = cx; memfill(&B(bcx), llptr, t, 0); @@ -988,7 +1017,8 @@ fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: b.call(llintrinsicfn, &[llptr, llzeroval, size, align, volatile], None); } -pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, name: &str) -> ValueRef { +pub fn alloc_ty<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + t: Ty<'tcx>, name: &str) -> ValueRef { let _icx = push_ctxt("alloc_ty"); let ccx = bcx.ccx(); let ty = type_of::type_of(ccx, t); @@ -997,15 +1027,15 @@ pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, name: &str) -> return val; } -pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { +pub fn alloca(cx: &mut BlockContext, ty: Type, name: &str) -> ValueRef { let p = alloca_no_lifetime(cx, ty, name); call_lifetime_start(cx, p); p } -pub fn alloca_no_lifetime(cx: Block, ty: Type, name: &str) -> ValueRef { +pub fn alloca_no_lifetime(cx: &mut BlockContext, ty: Type, name: &str) -> ValueRef { let _icx = push_ctxt("alloca"); - if cx.unreachable.get() { + if cx.bl.unreachable.get() { unsafe { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } @@ -1015,7 +1045,7 @@ pub fn alloca_no_lifetime(cx: Block, ty: Type, name: &str) -> ValueRef { } // Creates the alloca slot which holds the pointer to the slot for the final return value -pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, +pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &mut FunctionContext<'a, 'tcx>, output_type: Ty<'tcx>) -> ValueRef { let lloutputtype = type_of::type_of(fcx.ccx, output_type); @@ -1031,7 +1061,7 @@ pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, let outptr = get_param(fcx.llfn, 0); let b = fcx.ccx.builder(); - b.position_before(fcx.alloca_insert_pt.get().unwrap()); + b.position_before(fcx.alloca_insert_pt.unwrap()); b.store(outptr, slot); } @@ -1168,7 +1198,7 @@ pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, output_type: ty::FnOutput<'tcx>, param_substs: &'tcx Substs<'tcx>, sp: Option, - block_arena: &'a TypedArena>) + block_arena: &'a TypedArena) -> FunctionContext<'a, 'tcx> { common::validate_substs(param_substs); @@ -1199,22 +1229,22 @@ pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let mut fcx = FunctionContext { llfn: llfndecl, llenv: None, - llretslotptr: Cell::new(None), + llretslotptr: None, param_env: ty::empty_parameter_environment(ccx.tcx()), - alloca_insert_pt: Cell::new(None), - llreturn: Cell::new(None), + alloca_insert_pt: None, + llreturn: None, needs_ret_allocas: nested_returns, - personality: Cell::new(None), + personality: None, caller_expects_out_pointer: uses_outptr, - lllocals: RefCell::new(NodeMap()), - llupvars: RefCell::new(NodeMap()), + lllocals: NodeMap(), + llupvars: NodeMap(), id: id, param_substs: param_substs, span: sp, block_arena: block_arena, ccx: ccx, debug_context: debug_context, - scopes: RefCell::new(Vec::new()), + scopes: Vec::new(), cfg: cfg }; @@ -1227,18 +1257,19 @@ pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, /// Performs setup on a newly created function, creating the entry scope block /// and allocating space for the return pointer. -pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>, - skip_retptr: bool, - output: ty::FnOutput<'tcx>) - -> Block<'a, 'tcx> { +pub fn init_function<'r, 'a, 'tcx>(fcx: &'r mut FunctionContext<'a, 'tcx>, + skip_retptr: bool, + output: ty::FnOutput<'tcx>) + -> &'a Block { let entry_bcx = fcx.new_temp_block("entry-block"); // Use a dummy instruction as the insertion point for all allocas. // This is later removed in FunctionContext::cleanup. - fcx.alloca_insert_pt.set(Some(unsafe { - Load(entry_bcx, C_null(Type::i8p(fcx.ccx))); + let pt = unsafe { + Load(&mut entry_bcx.with_fcx(fcx), C_null(Type::i8p(fcx.ccx))); llvm::LLVMGetFirstInstruction(entry_bcx.llbb) - })); + }; + fcx.alloca_insert_pt = Some(pt); if let ty::FnConverging(output_type) = output { // This shouldn't need to recompute the return type, @@ -1251,7 +1282,8 @@ pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>, // Otherwise, we normally allocate the llretslotptr, unless we // have been instructed to skip it for immediate return // values. - fcx.llretslotptr.set(Some(make_return_slot_pointer(fcx, substd_output_type))); + let p = make_return_slot_pointer(fcx, substd_output_type); + fcx.llretslotptr = Some(p); } } } @@ -1266,7 +1298,7 @@ pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>, // - new_fn_ctxt // - trans_args -pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>) +pub fn arg_kind<'a, 'tcx>(cx: &mut FunctionContext<'a, 'tcx>, t: Ty<'tcx>) -> datum::Rvalue { use trans::datum::{ByRef, ByValue}; @@ -1281,7 +1313,7 @@ pub type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>; // create_datums_for_fn_args: creates rvalue datums for each of the // incoming function arguments. These will later be stored into // appropriate lvalue datums. -pub fn create_datums_for_fn_args<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, +pub fn create_datums_for_fn_args<'a, 'tcx>(fcx: &mut FunctionContext<'a, 'tcx>, arg_tys: &[Ty<'tcx>]) -> Vec> { let _icx = push_ctxt("create_datums_for_fn_args"); @@ -1299,8 +1331,8 @@ pub fn create_datums_for_fn_args<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, /// datums. /// /// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for. -fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>( - mut bcx: Block<'blk, 'tcx>, +fn create_datums_for_fn_args_under_call_abi<'r, 'blk, 'tcx>( + &mut BlockContext {bl, ref mut fcx}: &mut BlockContext<'r, 'blk, 'tcx>, arg_scope: cleanup::CustomScopeIndex, arg_tys: &[Ty<'tcx>]) -> Vec> { @@ -1308,8 +1340,8 @@ fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>( for (i, &arg_ty) in arg_tys.iter().enumerate() { if i < arg_tys.len() - 1 { // Regular argument. - let llarg = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(i) as c_uint); - result.push(datum::Datum::new(llarg, arg_ty, arg_kind(bcx.fcx, + let llarg = get_param(fcx.llfn, fcx.arg_pos(i) as c_uint); + result.push(datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty))); continue } @@ -1318,6 +1350,7 @@ fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>( match arg_ty.sty { ty::ty_tup(ref tupled_arg_tys) => { let tuple_args_scope_id = cleanup::CustomScope(arg_scope); + let mut bcx = &mut bl.with_fcx(fcx); let tuple = unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, @@ -1328,6 +1361,7 @@ fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>( |(), mut bcx, llval| { + let mut bl = bcx.bl; for (j, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { let llarg = @@ -1338,9 +1372,9 @@ fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>( llarg, tupled_arg_ty, arg_kind(bcx.fcx, tupled_arg_ty)); - bcx = datum.store_to(bcx, lldest); + bl = datum.store_to(&mut bl.with_fcx(bcx.fcx), lldest); } - bcx + bl })); let tuple = unpack_datum!(bcx, tuple.to_expr_datum() @@ -1349,7 +1383,7 @@ fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>( result.push(tuple); } _ => { - bcx.tcx().sess.bug("last argument of a function with \ + fcx.tcx().sess.bug("last argument of a function with \ `rust-call` ABI isn't a tuple?!") } }; @@ -1359,15 +1393,16 @@ fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>( result } -fn copy_args_to_allocas<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - arg_scope: cleanup::CustomScopeIndex, - args: &[ast::Arg], - arg_datums: Vec>) - -> Block<'blk, 'tcx> { +fn copy_args_to_allocas<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + arg_scope: cleanup::CustomScopeIndex, + args: &[ast::Arg], + arg_datums: Vec>) + -> &'blk Block { debug!("copy_args_to_allocas"); let _icx = push_ctxt("copy_args_to_allocas"); - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let arg_scope_id = cleanup::CustomScope(arg_scope); @@ -1380,27 +1415,27 @@ fn copy_args_to_allocas<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // This alloca should be optimized away by LLVM's mem-to-reg pass in // the event it's not truly needed. - bcx = _match::store_arg(bcx, &*args[i].pat, arg_datum, arg_scope_id); + bcx.bl = _match::store_arg(bcx, &*args[i].pat, arg_datum, arg_scope_id); debuginfo::create_argument_metadata(bcx, &args[i]); } - bcx + bcx.bl } // Ties up the llstaticallocas -> llloadenv -> lltop edges, // and builds the return block. -pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>, - last_bcx: Block<'blk, 'tcx>, - retty: ty::FnOutput<'tcx>, - ret_debug_loc: DebugLoc) { +pub fn finish_fn<'r, 'blk, 'tcx>(fcx: &mut FunctionContext<'blk, 'tcx>, + last_bcx: &'blk Block, + retty: ty::FnOutput<'tcx>, + ret_debug_loc: DebugLoc) { let _icx = push_ctxt("finish_fn"); - let ret_cx = match fcx.llreturn.get() { + let ret_cx = match fcx.llreturn { Some(llreturn) => { if !last_bcx.terminated.get() { - Br(last_bcx, llreturn, DebugLoc::None); + Br(&mut last_bcx.with_fcx(fcx), llreturn, DebugLoc::None); } - raw_block(fcx, false, llreturn) + &*raw_block(fcx, false, llreturn) } None => last_bcx }; @@ -1415,22 +1450,23 @@ pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>, } // Builds the return block for a function. -pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>, - ret_cx: Block<'blk, 'tcx>, - retty: ty::FnOutput<'tcx>, - ret_debug_location: DebugLoc) { - if fcx.llretslotptr.get().is_none() || +pub fn build_return_block<'r, 'blk, 'tcx>(fcx: &'r mut FunctionContext<'blk, 'tcx>, + ret_cx: &'blk Block, + retty: ty::FnOutput<'tcx>, + ret_debug_location: DebugLoc) { + if fcx.llretslotptr.is_none() || (!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) { - return RetVoid(ret_cx, ret_debug_location); + return RetVoid(&mut ret_cx.with_fcx(fcx), ret_debug_location); } let retslot = if fcx.needs_ret_allocas { - Load(ret_cx, fcx.llretslotptr.get().unwrap()) + let p = fcx.llretslotptr.unwrap(); + Load(&mut ret_cx.with_fcx(fcx), p) } else { - fcx.llretslotptr.get().unwrap() + fcx.llretslotptr.unwrap() }; let retptr = Value(retslot); - match retptr.get_dominating_store(ret_cx) { + match retptr.get_dominating_store(&mut ret_cx.with_fcx(fcx)) { // If there's only a single store to the ret slot, we can directly return // the value that was stored and omit the store and the alloca Some(s) => { @@ -1442,35 +1478,36 @@ pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>, } let retval = if retty == ty::FnConverging(fcx.ccx.tcx().types.bool) { - Trunc(ret_cx, retval, Type::i1(fcx.ccx)) + Trunc(&mut ret_cx.with_fcx(fcx), retval, Type::i1(fcx.ccx)) } else { retval }; if fcx.caller_expects_out_pointer { if let ty::FnConverging(retty) = retty { - store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty); + store_ty(&mut ret_cx.with_fcx(fcx), retval, get_param(fcx.llfn, 0), retty); } - RetVoid(ret_cx, ret_debug_location) + RetVoid(&mut ret_cx.with_fcx(fcx), ret_debug_location) } else { - Ret(ret_cx, retval, ret_debug_location) + Ret(&mut ret_cx.with_fcx(fcx), retval, ret_debug_location) } } // Otherwise, copy the return value to the ret slot None => match retty { ty::FnConverging(retty) => { if fcx.caller_expects_out_pointer { - memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty); - RetVoid(ret_cx, ret_debug_location) + memcpy_ty(&mut ret_cx.with_fcx(fcx), get_param(fcx.llfn, 0), retslot, retty); + RetVoid(&mut ret_cx.with_fcx(fcx), ret_debug_location) } else { - Ret(ret_cx, load_ty(ret_cx, retslot, retty), ret_debug_location) + let ty = load_ty(&mut ret_cx.with_fcx(fcx), retslot, retty); + Ret(&mut ret_cx.with_fcx(fcx), ty, ret_debug_location) } } ty::FnDiverging => { if fcx.caller_expects_out_pointer { - RetVoid(ret_cx, ret_debug_location) + RetVoid(&mut ret_cx.with_fcx(fcx), ret_debug_location) } else { - Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location) + Ret(&mut ret_cx.with_fcx(fcx), C_undef(Type::nil(fcx.ccx)), ret_debug_location) } } } @@ -1503,7 +1540,7 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, closure::ClosureEnv::NotClosure => false, }; - let (arena, fcx): (TypedArena<_>, FunctionContext); + let (arena, mut fcx): (TypedArena<_>, FunctionContext); arena = TypedArena::new(); fcx = new_fn_ctxt(ccx, llfndecl, @@ -1513,19 +1550,20 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, param_substs, Some(body.span), &arena); - let mut bcx = init_function(&fcx, false, output_type); + let mut fcx = &mut fcx; + let mut bcx = init_function(fcx, false, output_type); // cleanup scope for the incoming arguments let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(ccx, fn_ast_id, body.span, true); let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc); - let block_ty = node_id_type(bcx, body.id); + let block_ty = node_id_type(&mut bcx.with_fcx(fcx), body.id); // Set up arguments to the function. let monomorphized_arg_types = decl.inputs.iter() - .map(|arg| node_id_type(bcx, arg.id)) + .map(|arg| node_id_type(&mut bcx.with_fcx(fcx), arg.id)) .collect::>(); let monomorphized_arg_types = match closure_env { closure::ClosureEnv::NotClosure => { @@ -1542,31 +1580,32 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty_to_string(ccx.tcx(), *monomorphized_arg_type)); } debug!("trans_closure: function lltype: {}", - bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn)); + fcx.ccx.tn().val_to_string(fcx.llfn)); let arg_datums = match closure_env { closure::ClosureEnv::NotClosure if abi == RustCall => { - create_datums_for_fn_args_under_call_abi(bcx, arg_scope, &monomorphized_arg_types[..]) + create_datums_for_fn_args_under_call_abi(&mut bcx.with_fcx(fcx), arg_scope, + &monomorphized_arg_types[..]) } _ => { let arg_tys = untuple_arguments_if_necessary(ccx, &monomorphized_arg_types, abi); - create_datums_for_fn_args(&fcx, &arg_tys) + create_datums_for_fn_args(&mut fcx, &arg_tys) } }; - bcx = copy_args_to_allocas(bcx, arg_scope, &decl.inputs, arg_datums); + bcx = copy_args_to_allocas(&mut bcx.with_fcx(fcx), arg_scope, &decl.inputs, arg_datums); - bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope)); + bcx = closure_env.load(&mut bcx.with_fcx(fcx), cleanup::CustomScope(arg_scope)); // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location // emitting should be enabled. - debuginfo::start_emitting_source_locations(&fcx); + debuginfo::start_emitting_source_locations(&mut fcx); - let dest = match fcx.llretslotptr.get() { + let dest = match fcx.llretslotptr { Some(_) => expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(block_ty), "iret_slot")), None => { - assert!(type_is_zero_size(bcx.ccx(), block_ty)); + assert!(type_is_zero_size(&mut bcx.with_fcx(fcx).ccx(), block_ty)); expr::Ignore } }; @@ -1575,18 +1614,20 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // translation calls that don't have a return value (trans_crate, // trans_mod, trans_item, et cetera) and those that do // (trans_block, trans_expr, et cetera). - bcx = controlflow::trans_block(bcx, body, dest); + bcx = controlflow::trans_block(&mut bcx.with_fcx(fcx), body, dest); match dest { expr::SaveIn(slot) if fcx.needs_ret_allocas => { - Store(bcx, slot, fcx.llretslotptr.get().unwrap()); + let p = fcx.llretslotptr.unwrap(); + Store(&mut bcx.with_fcx(fcx), slot, p); } _ => {} } - match fcx.llreturn.get() { + match fcx.llreturn { Some(_) => { - Br(bcx, fcx.return_exit_block(), DebugLoc::None); + let b = fcx.return_exit_block(); + Br(&mut bcx.with_fcx(fcx), b, DebugLoc::None); fcx.pop_custom_cleanup_scope(arg_scope); } None => { @@ -1599,7 +1640,7 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Put return block after all other blocks. // This somewhat improves single-stepping experience in debugger. unsafe { - let llreturn = fcx.llreturn.get(); + let llreturn = fcx.llreturn; if let Some(llreturn) = llreturn { llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb); } @@ -1609,7 +1650,7 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_cleanup_debug_loc.span); // Insert the mandatory first few basic blocks before lltop. - finish_fn(&fcx, bcx, output_type, ret_debug_loc); + finish_fn(&mut fcx, bcx, output_type, ret_debug_loc); } /// Creates an LLVM function corresponding to a source language function. @@ -1647,14 +1688,15 @@ pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, llfndecl); } -pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - ctor_ty: Ty<'tcx>, - disr: ty::Disr, - args: callee::CallArgs, - dest: expr::Dest, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - +pub fn trans_named_tuple_constructor<'r, 'blk, 'tcx>( + &mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + ctor_ty: Ty<'tcx>, + disr: ty::Disr, + args: callee::CallArgs, + dest: expr::Dest, + debug_loc: DebugLoc) + -> Result<'blk> { + let mut bcx = &mut bl.with_fcx(fcx); let ccx = bcx.fcx.ccx; let tcx = ccx.tcx(); @@ -1685,13 +1727,13 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, match args { callee::ArgExprs(exprs) => { let fields = exprs.iter().map(|x| &**x).enumerate().collect::>(); - bcx = expr::trans_adt(bcx, - result_ty, - disr, - &fields[..], - None, - expr::SaveIn(llresult), - debug_loc); + bcx.bl = expr::trans_adt(bcx, + result_ty, + disr, + &fields[..], + None, + expr::SaveIn(llresult), + debug_loc); } _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor") } @@ -1700,13 +1742,13 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // If the caller doesn't care about the result // drop the temporary we made let bcx = match dest { - expr::SaveIn(_) => bcx, + expr::SaveIn(_) => bcx.bl, expr::Ignore => { - let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc); + let bcx_ = glue::drop_ty(bcx, llresult, result_ty, debug_loc); if !type_is_zero_size(ccx, result_ty) { - call_lifetime_end(bcx, llresult); + call_lifetime_end(&mut bcx_.with_fcx(bcx.fcx), llresult); } - bcx + bcx_ } }; @@ -1746,11 +1788,12 @@ fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx ty_to_string(ccx.tcx(), ctor_ty))) }; - let (arena, fcx): (TypedArena<_>, FunctionContext); + let (arena, mut fcx): (TypedArena<_>, FunctionContext); arena = TypedArena::new(); fcx = new_fn_ctxt(ccx, llfndecl, ctor_id, false, result_ty, param_substs, None, &arena); - let bcx = init_function(&fcx, false, result_ty); + let mut fcx = &mut fcx; + let bcx = init_function(fcx, false, result_ty); assert!(!fcx.needs_ret_allocas); @@ -1758,23 +1801,23 @@ fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx ty::erase_late_bound_regions( ccx.tcx(), &ty::ty_fn_args(ctor_ty)); - let arg_datums = create_datums_for_fn_args(&fcx, &arg_tys[..]); + let arg_datums = create_datums_for_fn_args(&mut fcx, &arg_tys[..]); if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) { let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot"); let repr = adt::represent_type(ccx, result_ty.unwrap()); for (i, arg_datum) in arg_datums.into_iter().enumerate() { - let lldestptr = adt::trans_field_ptr(bcx, + let lldestptr = adt::trans_field_ptr(&mut bcx.with_fcx(fcx), &*repr, dest, disr, i); - arg_datum.store_to(bcx, lldestptr); + arg_datum.store_to(&mut bcx.with_fcx(fcx), lldestptr); } - adt::trans_set_discr(bcx, &*repr, dest, disr); + adt::trans_set_discr(&mut bcx.with_fcx(fcx), &*repr, dest, disr); } - finish_fn(&fcx, bcx, result_ty, DebugLoc::None); + finish_fn(fcx, bcx, result_ty, DebugLoc::None); } fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) { @@ -2088,7 +2131,7 @@ pub fn register_fn_llvmty(ccx: &CrateContext, sp: Span, sym: String, node_id: ast::NodeId, - cc: llvm::CallConv, + cc: llvm::CallConv, llfty: Type) -> ValueRef { debug!("register_fn_llvmty id={} sym={}", node_id, sym); diff --git a/src/librustc_trans/trans/build.rs b/src/librustc_trans/trans/build.rs index d6ac412a4faea..39fb7a7844bfc 100644 --- a/src/librustc_trans/trans/build.rs +++ b/src/librustc_trans/trans/build.rs @@ -24,20 +24,20 @@ use trans::debuginfo::DebugLoc; use libc::{c_uint, c_char}; -pub fn terminate(cx: Block, _: &str) { +pub fn terminate(cx: &mut BlockContext, _: &str) { debug!("terminate({})", cx.to_str()); - cx.terminated.set(true); + cx.bl.terminated.set(true); } -pub fn check_not_terminated(cx: Block) { - if cx.terminated.get() { +pub fn check_not_terminated(cx: &mut BlockContext) { + if cx.bl.terminated.get() { panic!("already terminated!"); } } -pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> { +pub fn B<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>) -> Builder<'blk, 'tcx> { let b = cx.fcx.ccx.builder(); - b.position_at_end(cx.llbb); + b.position_at_end(cx.bl.llbb); b } @@ -49,8 +49,8 @@ pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> { // for (panic/break/return statements, call to diverging functions, etc), and // further instructions to the block should simply be ignored. -pub fn RetVoid(cx: Block, debug_loc: DebugLoc) { - if cx.unreachable.get() { +pub fn RetVoid(cx: &mut BlockContext, debug_loc: DebugLoc) { + if cx.bl.unreachable.get() { return; } check_not_terminated(cx); @@ -59,8 +59,8 @@ pub fn RetVoid(cx: Block, debug_loc: DebugLoc) { B(cx).ret_void(); } -pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { +pub fn Ret(cx: &mut BlockContext, v: ValueRef, debug_loc: DebugLoc) { + if cx.bl.unreachable.get() { return; } check_not_terminated(cx); @@ -69,10 +69,10 @@ pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) { B(cx).ret(v); } -pub fn AggregateRet(cx: Block, +pub fn AggregateRet(cx: &mut BlockContext, ret_vals: &[ValueRef], debug_loc: DebugLoc) { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return; } check_not_terminated(cx); @@ -81,8 +81,8 @@ pub fn AggregateRet(cx: Block, B(cx).aggregate_ret(ret_vals); } -pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { +pub fn Br(cx: &mut BlockContext, dest: BasicBlockRef, debug_loc: DebugLoc) { + if cx.bl.unreachable.get() { return; } check_not_terminated(cx); @@ -91,12 +91,12 @@ pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) { B(cx).br(dest); } -pub fn CondBr(cx: Block, +pub fn CondBr(cx: &mut BlockContext, if_: ValueRef, then: BasicBlockRef, else_: BasicBlockRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return; } check_not_terminated(cx); @@ -105,9 +105,9 @@ pub fn CondBr(cx: Block, B(cx).cond_br(if_, then, else_); } -pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: usize) +pub fn Switch(cx: &mut BlockContext, v: ValueRef, else_: BasicBlockRef, num_cases: usize) -> ValueRef { - if cx.unreachable.get() { return _Undef(v); } + if cx.bl.unreachable.get() { return _Undef(v); } check_not_terminated(cx); terminate(cx, "Switch"); B(cx).switch(v, else_, num_cases) @@ -120,11 +120,11 @@ pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { } } -pub fn IndirectBr(cx: Block, +pub fn IndirectBr(cx: &mut BlockContext, addr: ValueRef, num_dests: usize, debug_loc: DebugLoc) { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return; } check_not_terminated(cx); @@ -133,7 +133,7 @@ pub fn IndirectBr(cx: Block, B(cx).indirect_br(addr, num_dests); } -pub fn Invoke(cx: Block, +pub fn Invoke(cx: &mut BlockContext, fn_: ValueRef, args: &[ValueRef], then: BasicBlockRef, @@ -141,7 +141,7 @@ pub fn Invoke(cx: Block, attributes: Option, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return C_null(Type::i8(cx.ccx())); } check_not_terminated(cx); @@ -153,12 +153,12 @@ pub fn Invoke(cx: Block, B(cx).invoke(fn_, args, then, catch, attributes) } -pub fn Unreachable(cx: Block) { - if cx.unreachable.get() { +pub fn Unreachable(cx: &mut BlockContext) { + if cx.bl.unreachable.get() { return } - cx.unreachable.set(true); - if !cx.terminated.get() { + cx.bl.unreachable.set(true); + if !cx.bl.terminated.get() { B(cx).unreachable(); } } @@ -170,352 +170,352 @@ pub fn _Undef(val: ValueRef) -> ValueRef { } /* Arithmetic */ -pub fn Add(cx: Block, +pub fn Add(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).add(lhs, rhs) } -pub fn NSWAdd(cx: Block, +pub fn NSWAdd(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).nswadd(lhs, rhs) } -pub fn NUWAdd(cx: Block, +pub fn NUWAdd(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).nuwadd(lhs, rhs) } -pub fn FAdd(cx: Block, +pub fn FAdd(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).fadd(lhs, rhs) } -pub fn Sub(cx: Block, +pub fn Sub(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).sub(lhs, rhs) } -pub fn NSWSub(cx: Block, +pub fn NSWSub(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).nswsub(lhs, rhs) } -pub fn NUWSub(cx: Block, +pub fn NUWSub(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).nuwsub(lhs, rhs) } -pub fn FSub(cx: Block, +pub fn FSub(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).fsub(lhs, rhs) } -pub fn Mul(cx: Block, +pub fn Mul(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).mul(lhs, rhs) } -pub fn NSWMul(cx: Block, +pub fn NSWMul(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).nswmul(lhs, rhs) } -pub fn NUWMul(cx: Block, +pub fn NUWMul(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).nuwmul(lhs, rhs) } -pub fn FMul(cx: Block, +pub fn FMul(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).fmul(lhs, rhs) } -pub fn UDiv(cx: Block, +pub fn UDiv(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).udiv(lhs, rhs) } -pub fn SDiv(cx: Block, +pub fn SDiv(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).sdiv(lhs, rhs) } -pub fn ExactSDiv(cx: Block, +pub fn ExactSDiv(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).exactsdiv(lhs, rhs) } -pub fn FDiv(cx: Block, +pub fn FDiv(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).fdiv(lhs, rhs) } -pub fn URem(cx: Block, +pub fn URem(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).urem(lhs, rhs) } -pub fn SRem(cx: Block, +pub fn SRem(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).srem(lhs, rhs) } -pub fn FRem(cx: Block, +pub fn FRem(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).frem(lhs, rhs) } -pub fn Shl(cx: Block, +pub fn Shl(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).shl(lhs, rhs) } -pub fn LShr(cx: Block, +pub fn LShr(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).lshr(lhs, rhs) } -pub fn AShr(cx: Block, +pub fn AShr(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).ashr(lhs, rhs) } -pub fn And(cx: Block, +pub fn And(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).and(lhs, rhs) } -pub fn Or(cx: Block, +pub fn Or(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).or(lhs, rhs) } -pub fn Xor(cx: Block, +pub fn Xor(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).xor(lhs, rhs) } -pub fn BinOp(cx: Block, +pub fn BinOp(cx: &mut BlockContext, op: Opcode, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _Undef(lhs); } debug_loc.apply(cx.fcx); B(cx).binop(op, lhs, rhs) } -pub fn Neg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { +pub fn Neg(cx: &mut BlockContext, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.bl.unreachable.get() { return _Undef(v); } debug_loc.apply(cx.fcx); B(cx).neg(v) } -pub fn NSWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { +pub fn NSWNeg(cx: &mut BlockContext, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.bl.unreachable.get() { return _Undef(v); } debug_loc.apply(cx.fcx); B(cx).nswneg(v) } -pub fn NUWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { +pub fn NUWNeg(cx: &mut BlockContext, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.bl.unreachable.get() { return _Undef(v); } debug_loc.apply(cx.fcx); B(cx).nuwneg(v) } -pub fn FNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { +pub fn FNeg(cx: &mut BlockContext, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.bl.unreachable.get() { return _Undef(v); } debug_loc.apply(cx.fcx); B(cx).fneg(v) } -pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { +pub fn Not(cx: &mut BlockContext, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.bl.unreachable.get() { return _Undef(v); } debug_loc.apply(cx.fcx); @@ -523,9 +523,9 @@ pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { } /* Memory */ -pub fn Malloc(cx: Block, ty: Type, debug_loc: DebugLoc) -> ValueRef { +pub fn Malloc(cx: &mut BlockContext, ty: Type, debug_loc: DebugLoc) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); } debug_loc.apply(cx.fcx); @@ -533,12 +533,12 @@ pub fn Malloc(cx: Block, ty: Type, debug_loc: DebugLoc) -> ValueRef { } } -pub fn ArrayMalloc(cx: Block, +pub fn ArrayMalloc(cx: &mut BlockContext, ty: Type, val: ValueRef, debug_loc: DebugLoc) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); } debug_loc.apply(cx.fcx); @@ -546,39 +546,39 @@ pub fn ArrayMalloc(cx: Block, } } -pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef { +pub fn Alloca(cx: &mut BlockContext, ty: Type, name: &str) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } AllocaFcx(cx.fcx, ty, name) } } -pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef { +pub fn AllocaFcx(fcx: &mut FunctionContext, ty: Type, name: &str) -> ValueRef { let b = fcx.ccx.builder(); - b.position_before(fcx.alloca_insert_pt.get().unwrap()); + b.position_before(fcx.alloca_insert_pt.unwrap()); DebugLoc::None.apply(fcx); b.alloca(ty, name) } -pub fn ArrayAlloca(cx: Block, ty: Type, val: ValueRef) -> ValueRef { +pub fn ArrayAlloca(cx: &mut BlockContext, ty: Type, val: ValueRef) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } let b = cx.fcx.ccx.builder(); - b.position_before(cx.fcx.alloca_insert_pt.get().unwrap()); + b.position_before(cx.fcx.alloca_insert_pt.unwrap()); DebugLoc::None.apply(cx.fcx); b.array_alloca(ty, val) } } -pub fn Free(cx: Block, pointer_val: ValueRef) { - if cx.unreachable.get() { return; } +pub fn Free(cx: &mut BlockContext, pointer_val: ValueRef) { + if cx.bl.unreachable.get() { return; } B(cx).free(pointer_val) } -pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef { +pub fn Load(cx: &mut BlockContext, pointer_val: ValueRef) -> ValueRef { unsafe { let ccx = cx.fcx.ccx; - if cx.unreachable.get() { + if cx.bl.unreachable.get() { let ty = val_ty(pointer_val); let eltty = if ty.kind() == llvm::Array { ty.element_type() @@ -591,19 +591,19 @@ pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef { } } -pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef { +pub fn VolatileLoad(cx: &mut BlockContext, pointer_val: ValueRef) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); } B(cx).volatile_load(pointer_val) } } -pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef { +pub fn AtomicLoad(cx: &mut BlockContext, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef { unsafe { let ccx = cx.fcx.ccx; - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); } B(cx).atomic_load(pointer_val, order) @@ -611,9 +611,9 @@ pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> Va } -pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64, +pub fn LoadRangeAssert(cx: &mut BlockContext, pointer_val: ValueRef, lo: u64, hi: u64, signed: llvm::Bool) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { let ccx = cx.fcx.ccx; let ty = val_ty(pointer_val); let eltty = if ty.kind() == llvm::Array { @@ -629,8 +629,8 @@ pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64, } } -pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { +pub fn LoadNonNull(cx: &mut BlockContext, ptr: ValueRef) -> ValueRef { + if cx.bl.unreachable.get() { let ccx = cx.fcx.ccx; let ty = val_ty(ptr); let eltty = if ty.kind() == llvm::Array { @@ -646,24 +646,24 @@ pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef { } } -pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { return C_nil(cx.ccx()); } +pub fn Store(cx: &mut BlockContext, val: ValueRef, ptr: ValueRef) -> ValueRef { + if cx.bl.unreachable.get() { return C_nil(cx.ccx()); } B(cx).store(val, ptr) } -pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { return C_nil(cx.ccx()); } +pub fn VolatileStore(cx: &mut BlockContext, val: ValueRef, ptr: ValueRef) -> ValueRef { + if cx.bl.unreachable.get() { return C_nil(cx.ccx()); } B(cx).volatile_store(val, ptr) } -pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { - if cx.unreachable.get() { return; } +pub fn AtomicStore(cx: &mut BlockContext, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { + if cx.bl.unreachable.get() { return; } B(cx).atomic_store(val, ptr, order) } -pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { +pub fn GEP(cx: &mut BlockContext, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); } B(cx).gep(pointer, indices) @@ -673,45 +673,45 @@ pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { // Simple wrapper around GEP that takes an array of ints and wraps them // in C_i32() #[inline] -pub fn GEPi(cx: Block, base: ValueRef, ixs: &[usize]) -> ValueRef { +pub fn GEPi(cx: &mut BlockContext, base: ValueRef, ixs: &[usize]) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); } B(cx).gepi(base, ixs) } } -pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { +pub fn InBoundsGEP(cx: &mut BlockContext, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); } B(cx).inbounds_gep(pointer, indices) } } -pub fn StructGEP(cx: Block, pointer: ValueRef, idx: usize) -> ValueRef { +pub fn StructGEP(cx: &mut BlockContext, pointer: ValueRef, idx: usize) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); } B(cx).struct_gep(pointer, idx) } } -pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef { +pub fn GlobalString(cx: &mut BlockContext, _str: *const c_char) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); } B(cx).global_string(_str) } } -pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef { +pub fn GlobalStringPtr(cx: &mut BlockContext, _str: *const c_char) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); } B(cx).global_string_ptr(_str) @@ -719,151 +719,151 @@ pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef { } /* Casts */ -pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn Trunc(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).trunc(val, dest_ty) } } -pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn ZExt(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).zext(val, dest_ty) } } -pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn SExt(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).sext(val, dest_ty) } } -pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn FPToUI(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).fptoui(val, dest_ty) } } -pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn FPToSI(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).fptosi(val, dest_ty) } } -pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn UIToFP(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).uitofp(val, dest_ty) } } -pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn SIToFP(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).sitofp(val, dest_ty) } } -pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn FPTrunc(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).fptrunc(val, dest_ty) } } -pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn FPExt(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).fpext(val, dest_ty) } } -pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn PtrToInt(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).ptrtoint(val, dest_ty) } } -pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn IntToPtr(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).inttoptr(val, dest_ty) } } -pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn BitCast(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).bitcast(val, dest_ty) } } -pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn ZExtOrBitCast(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).zext_or_bitcast(val, dest_ty) } } -pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn SExtOrBitCast(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).sext_or_bitcast(val, dest_ty) } } -pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn TruncOrBitCast(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).trunc_or_bitcast(val, dest_ty) } } -pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type, +pub fn Cast(cx: &mut BlockContext, op: Opcode, val: ValueRef, dest_ty: Type, _: *const u8) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).cast(op, val, dest_ty) } } -pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn PointerCast(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).pointercast(val, dest_ty) } } -pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn IntCast(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).intcast(val, dest_ty) } } -pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { +pub fn FPCast(cx: &mut BlockContext, val: ValueRef, dest_ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } B(cx).fpcast(val, dest_ty) } } /* Comparisons */ -pub fn ICmp(cx: Block, +pub fn ICmp(cx: &mut BlockContext, op: IntPredicate, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); } debug_loc.apply(cx.fcx); @@ -871,14 +871,14 @@ pub fn ICmp(cx: Block, } } -pub fn FCmp(cx: Block, +pub fn FCmp(cx: &mut BlockContext, op: RealPredicate, lhs: ValueRef, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); } debug_loc.apply(cx.fcx); @@ -887,17 +887,17 @@ pub fn FCmp(cx: Block, } /* Miscellaneous instructions */ -pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef { +pub fn EmptyPhi(cx: &mut BlockContext, ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } B(cx).empty_phi(ty) } } -pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef], +pub fn Phi(cx: &mut BlockContext, ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } B(cx).phi(ty, vals, bbs) } } @@ -909,7 +909,7 @@ pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { } } -pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef { +pub fn _UndefReturn(cx: &mut BlockContext, fn_: ValueRef) -> ValueRef { unsafe { let ccx = cx.fcx.ccx; let ty = val_ty(fn_); @@ -923,177 +923,180 @@ pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef { } } -pub fn add_span_comment(cx: Block, sp: Span, text: &str) { +pub fn add_span_comment(cx: &mut BlockContext, sp: Span, text: &str) { B(cx).add_span_comment(sp, text) } -pub fn add_comment(cx: Block, text: &str) { +pub fn add_comment(cx: &mut BlockContext, text: &str) { B(cx).add_comment(text) } -pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char, +pub fn InlineAsmCall(cx: &mut BlockContext, asm: *const c_char, cons: *const c_char, inputs: &[ValueRef], output: Type, volatile: bool, alignstack: bool, dia: AsmDialect) -> ValueRef { B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia) } -pub fn Call(cx: Block, +pub fn Call(cx: &mut BlockContext, fn_: ValueRef, args: &[ValueRef], attributes: Option, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _UndefReturn(cx, fn_); } debug_loc.apply(cx.fcx); B(cx).call(fn_, args, attributes) } -pub fn CallWithConv(cx: Block, +pub fn CallWithConv(cx: &mut BlockContext, fn_: ValueRef, args: &[ValueRef], conv: CallConv, attributes: Option, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return _UndefReturn(cx, fn_); } debug_loc.apply(cx.fcx); B(cx).call_with_conv(fn_, args, conv, attributes) } -pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) { - if cx.unreachable.get() { return; } +pub fn AtomicFence(cx: &mut BlockContext, order: AtomicOrdering, scope: SynchronizationScope) { + if cx.bl.unreachable.get() { return; } B(cx).atomic_fence(order, scope) } -pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef { - if cx.unreachable.get() { return _Undef(then); } +pub fn Select(cx: &mut BlockContext, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef { + if cx.bl.unreachable.get() { return _Undef(then); } B(cx).select(if_, then, else_) } -pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef { +pub fn VAArg(cx: &mut BlockContext, list: ValueRef, ty: Type) -> ValueRef { unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } B(cx).va_arg(list, ty) } } -pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef { +pub fn ExtractElement(cx: &mut BlockContext, vec_val: ValueRef, index: ValueRef) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); } B(cx).extract_element(vec_val, index) } } -pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef, +pub fn InsertElement(cx: &mut BlockContext, vec_val: ValueRef, elt_val: ValueRef, index: ValueRef) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); } B(cx).insert_element(vec_val, elt_val, index) } } -pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef, +pub fn ShuffleVector(cx: &mut BlockContext, v1: ValueRef, v2: ValueRef, mask: ValueRef) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); } B(cx).shuffle_vector(v1, v2, mask) } } -pub fn VectorSplat(cx: Block, num_elts: usize, elt_val: ValueRef) -> ValueRef { +pub fn VectorSplat(cx: &mut BlockContext, num_elts: usize, elt_val: ValueRef) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); } B(cx).vector_splat(num_elts, elt_val) } } -pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: usize) -> ValueRef { +pub fn ExtractValue(cx: &mut BlockContext, agg_val: ValueRef, index: usize) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); } B(cx).extract_value(agg_val, index) } } -pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef { +pub fn InsertValue(cx: &mut BlockContext, + agg_val: ValueRef, + elt_val: ValueRef, + index: usize) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); } B(cx).insert_value(agg_val, elt_val, index) } } -pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef { +pub fn IsNull(cx: &mut BlockContext, val: ValueRef) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); } B(cx).is_null(val) } } -pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef { +pub fn IsNotNull(cx: &mut BlockContext, val: ValueRef) -> ValueRef { unsafe { - if cx.unreachable.get() { + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); } B(cx).is_not_null(val) } } -pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef { +pub fn PtrDiff(cx: &mut BlockContext, lhs: ValueRef, rhs: ValueRef) -> ValueRef { unsafe { let ccx = cx.fcx.ccx; - if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); } + if cx.bl.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); } B(cx).ptrdiff(lhs, rhs) } } -pub fn Trap(cx: Block) { - if cx.unreachable.get() { return; } +pub fn Trap(cx: &mut BlockContext) { + if cx.bl.unreachable.get() { return; } B(cx).trap(); } -pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef, +pub fn LandingPad(cx: &mut BlockContext, ty: Type, pers_fn: ValueRef, num_clauses: usize) -> ValueRef { check_not_terminated(cx); - assert!(!cx.unreachable.get()); + assert!(!cx.bl.unreachable.get()); B(cx).landing_pad(ty, pers_fn, num_clauses) } -pub fn SetCleanup(cx: Block, landing_pad: ValueRef) { +pub fn SetCleanup(cx: &mut BlockContext, landing_pad: ValueRef) { B(cx).set_cleanup(landing_pad) } -pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef { +pub fn Resume(cx: &mut BlockContext, exn: ValueRef) -> ValueRef { check_not_terminated(cx); terminate(cx, "Resume"); B(cx).resume(exn) } // Atomic Operations -pub fn AtomicCmpXchg(cx: Block, dst: ValueRef, +pub fn AtomicCmpXchg(cx: &mut BlockContext, dst: ValueRef, cmp: ValueRef, src: ValueRef, order: AtomicOrdering, failure_order: AtomicOrdering) -> ValueRef { B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order) } -pub fn AtomicRMW(cx: Block, op: AtomicBinOp, +pub fn AtomicRMW(cx: &mut BlockContext, op: AtomicBinOp, dst: ValueRef, src: ValueRef, order: AtomicOrdering) -> ValueRef { B(cx).atomic_rmw(op, dst, src, order) diff --git a/src/librustc_trans/trans/callee.rs b/src/librustc_trans/trans/callee.rs index e87c058faf9c1..de414a8b512bc 100644 --- a/src/librustc_trans/trans/callee.rs +++ b/src/librustc_trans/trans/callee.rs @@ -36,7 +36,7 @@ use trans::callee; use trans::cleanup; use trans::cleanup::CleanupMethods; use trans::closure; -use trans::common::{self, Block, Result, NodeIdAndSpan, ExprId, CrateContext, +use trans::common::{self, BlockContext, Block, Result, NodeIdAndSpan, ExprId, CrateContext, ExprOrMethodCall, FunctionContext, MethodCallKey}; use trans::consts; use trans::datum::*; @@ -83,19 +83,20 @@ pub enum CalleeData<'tcx> { } pub struct Callee<'blk, 'tcx: 'blk> { - pub bcx: Block<'blk, 'tcx>, + pub bcx: &'blk Block, pub data: CalleeData<'tcx>, } -fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) - -> Callee<'blk, 'tcx> { +fn trans<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, expr: &ast::Expr) + -> Callee<'blk, 'tcx> { let _icx = push_ctxt("trans_callee"); debug!("callee::trans(expr={})", expr.repr(bcx.tcx())); // pick out special kinds of expressions that can be called: match expr.node { ast::ExprPath(..) => { - return trans_def(bcx, bcx.def(expr.id), expr); + let d = bcx.def(expr.id); + return trans_def(bcx, d, expr); } _ => {} } @@ -103,14 +104,14 @@ fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) // any other expressions are closures: return datum_callee(bcx, expr); - fn datum_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) - -> Callee<'blk, 'tcx> { - let DatumBlock { bcx, datum, .. } = expr::trans(bcx, expr); + fn datum_callee<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, expr: &ast::Expr) + -> Callee<'blk, 'tcx> { + let DatumBlock { bcx: bl, datum, .. } = expr::trans(bcx, expr); match datum.ty.sty { ty::ty_bare_fn(..) => { - let llval = datum.to_llscalarish(bcx); + let llval = datum.to_llscalarish(&mut bl.with_fcx(bcx.fcx)); return Callee { - bcx: bcx, + bcx: bl, data: Fn(llval), }; } @@ -119,20 +120,20 @@ fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) expr.span, &format!("type of callee is neither bare-fn nor closure: \ {}", - bcx.ty_to_string(datum.ty))); + bl.with_fcx(bcx.fcx).ty_to_string(datum.ty))); } } } - fn fn_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llfn: ValueRef) + fn fn_callee<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, llfn: ValueRef) -> Callee<'blk, 'tcx> { return Callee { - bcx: bcx, + bcx: bcx.bl, data: Fn(llfn), }; } - fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + fn trans_def<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, def: def::Def, ref_expr: &ast::Expr) -> Callee<'blk, 'tcx> { @@ -152,7 +153,7 @@ fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) ExprId(ref_expr.id), bcx.fcx.param_substs); Callee { - bcx: bcx, + bcx: bcx.bl, data: NamedTupleConstructor(substs, 0) } } @@ -164,18 +165,20 @@ fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) ExprId(ref_expr.id), bcx.fcx.param_substs); let def_id = inline::maybe_instantiate_inline(bcx.ccx(), did); - Callee { bcx: bcx, data: Intrinsic(def_id.node, substs) } + Callee { bcx: bcx.bl, data: Intrinsic(def_id.node, substs) } } def::DefFn(did, _) | def::DefMethod(did, def::FromImpl(_)) => { - fn_callee(bcx, trans_fn_ref(bcx.ccx(), did, ExprId(ref_expr.id), - bcx.fcx.param_substs).val) + let v = trans_fn_ref(bcx.ccx(), did, ExprId(ref_expr.id), + bcx.fcx.param_substs).val; + fn_callee(bcx, v) } def::DefMethod(meth_did, def::FromTrait(trait_did)) => { - fn_callee(bcx, meth::trans_static_method_callee(bcx.ccx(), - meth_did, - trait_did, - ref_expr.id, - bcx.fcx.param_substs).val) + let v = meth::trans_static_method_callee(bcx.ccx(), + meth_did, + trait_did, + ref_expr.id, + bcx.fcx.param_substs).val; + fn_callee(bcx, v) } def::DefVariant(tid, vid, _) => { let vinfo = ty::enum_variant_with_id(bcx.tcx(), tid, vid); @@ -187,7 +190,7 @@ fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) assert!(!vinfo.args.is_empty()); Callee { - bcx: bcx, + bcx: bcx.bl, data: NamedTupleConstructor(substs, vinfo.disr_val) } } @@ -196,7 +199,7 @@ fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) ExprId(ref_expr.id), bcx.fcx.param_substs); Callee { - bcx: bcx, + bcx: bcx.bl, data: NamedTupleConstructor(substs, 0) } } @@ -237,13 +240,13 @@ pub fn trans_fn_ref<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, trans_fn_ref_with_substs(ccx, def_id, node, param_substs, substs) } -fn trans_fn_ref_with_substs_to_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - def_id: ast::DefId, - ref_id: ast::NodeId, - substs: subst::Substs<'tcx>) - -> Callee<'blk, 'tcx> { +fn trans_fn_ref_with_substs_to_callee<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + def_id: ast::DefId, + ref_id: ast::NodeId, + substs: subst::Substs<'tcx>) + -> Callee<'blk, 'tcx> { Callee { - bcx: bcx, + bcx: bcx.bl, data: Fn(trans_fn_ref_with_substs(bcx.ccx(), def_id, ExprId(ref_id), @@ -334,7 +337,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( // let empty_substs = tcx.mk_substs(Substs::trans_empty()); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); + let (block_arena, mut fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = new_fn_ctxt(ccx, llfn, @@ -344,11 +347,13 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( empty_substs, None, &block_arena); - let mut bcx = init_function(&fcx, false, sig.output); + let mut fcx = &mut fcx; + let mut bcx = init_function(fcx, false, sig.output); // the first argument (`self`) will be ptr to the the fn pointer let llfnpointer = if is_by_ref { - Load(bcx, get_param(fcx.llfn, fcx.arg_pos(0) as u32)) + let p = get_param(fcx.llfn, fcx.arg_pos(0) as u32); + Load(&mut bcx.with_fcx(fcx), p) } else { get_param(fcx.llfn, fcx.arg_pos(0) as u32) }; @@ -361,18 +366,18 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( .collect(); assert!(!fcx.needs_ret_allocas); - let dest = fcx.llretslotptr.get().map(|_| + let dest = fcx.llretslotptr.map(|_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot")) ); - bcx = trans_call_inner(bcx, + bcx = trans_call_inner(&mut bcx.with_fcx(fcx), DebugLoc::None, bare_fn_ty, - |bcx, _| Callee { bcx: bcx, data: Fn(llfnpointer) }, + |bcx, _| Callee { bcx: bcx.bl, data: Fn(llfnpointer) }, ArgVals(&llargs[..]), dest).bcx; - finish_fn(&fcx, bcx, sig.output, DebugLoc::None); + finish_fn(fcx, bcx, sig.output, DebugLoc::None); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); @@ -592,27 +597,28 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>( // ______________________________________________________________________ // Translating calls -pub fn trans_call<'a, 'blk, 'tcx>(in_cx: Block<'blk, 'tcx>, - call_expr: &ast::Expr, - f: &ast::Expr, - args: CallArgs<'a, 'tcx>, - dest: expr::Dest) - -> Block<'blk, 'tcx> { +pub fn trans_call<'a, 'r, 'blk, 'tcx>(in_cx: &mut BlockContext<'r, 'blk, 'tcx>, + call_expr: &ast::Expr, + f: &ast::Expr, + args: CallArgs<'a, 'tcx>, + dest: expr::Dest) + -> &'blk Block { let _icx = push_ctxt("trans_call"); + let ty = common::expr_ty_adjusted(in_cx, f); trans_call_inner(in_cx, call_expr.debug_loc(), - common::expr_ty_adjusted(in_cx, f), + ty, |cx, _| trans(cx, f), args, Some(dest)).bcx } -pub fn trans_method_call<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_expr: &ast::Expr, - rcvr: &ast::Expr, - args: CallArgs<'a, 'tcx>, - dest: expr::Dest) - -> Block<'blk, 'tcx> { +pub fn trans_method_call<'a, 'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + call_expr: &ast::Expr, + rcvr: &ast::Expr, + args: CallArgs<'a, 'tcx>, + dest: expr::Dest) + -> &'blk Block { let _icx = push_ctxt("trans_method_call"); debug!("trans_method_call(call_expr={})", call_expr.repr(bcx.tcx())); let method_call = MethodCall::expr(call_expr.id); @@ -628,10 +634,11 @@ pub fn trans_method_call<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, }, None => panic!("method not found in trans_method_call") }; + let ty = common::monomorphize_type(bcx, method_ty); trans_call_inner( bcx, call_expr.debug_loc(), - common::monomorphize_type(bcx, method_ty), + ty, |cx, arg_cleanup_scope| { meth::trans_method_callee(cx, method_call, Some(rcvr), arg_cleanup_scope) }, @@ -639,12 +646,12 @@ pub fn trans_method_call<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, Some(dest)).bcx } -pub fn trans_lang_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - did: ast::DefId, - args: &[ValueRef], - dest: Option, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { +pub fn trans_lang_call<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + did: ast::DefId, + args: &[ValueRef], + dest: Option, + debug_loc: DebugLoc) + -> Result<'blk> { let fty = if did.krate == ast::LOCAL_CRATE { ty::node_id_to_type(bcx.tcx(), did.node) } else { @@ -673,14 +680,15 @@ pub fn trans_lang_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// /// For non-lang items, `dest` is always Some, and hence the result is written into memory /// somewhere. Nonetheless we return the actual return value of the function. -pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc, - callee_ty: Ty<'tcx>, - get_callee: F, - args: CallArgs<'a, 'tcx>, - dest: Option) - -> Result<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, cleanup::ScopeId) -> Callee<'blk, 'tcx>, +pub fn trans_call_inner<'a, 'r, 'blk, 'tcx, F> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + debug_loc: DebugLoc, + callee_ty: Ty<'tcx>, + get_callee: F, + args: CallArgs<'a, 'tcx>, + dest: Option) + -> Result<'blk> where + F: for<'b> FnOnce(&mut BlockContext<'b, 'blk, 'tcx>, cleanup::ScopeId) -> Callee<'blk, 'tcx>, { // Introduce a temporary cleanup scope that will contain cleanups // for the arguments while they are being evaluated. The purpose @@ -689,16 +697,15 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, // cleaned up. If no panic occurs, the values are handed off to // the callee, and hence none of the cleanups in this temporary // scope will ever execute. - let fcx = bcx.fcx; let ccx = fcx.ccx; let arg_cleanup_scope = fcx.push_custom_cleanup_scope(); - let callee = get_callee(bcx, cleanup::CustomScope(arg_cleanup_scope)); + let callee = get_callee(&mut bl.with_fcx(fcx), cleanup::CustomScope(arg_cleanup_scope)); let mut bcx = callee.bcx; let (abi, ret_ty) = match callee_ty.sty { ty::ty_bare_fn(_, ref f) => { - let output = ty::erase_late_bound_regions(bcx.tcx(), &f.sig.output()); + let output = ty::erase_late_bound_regions(bcx.with_fcx(fcx).tcx(), &f.sig.output()); (f.abi, output) } _ => panic!("expected bare rust fn or closure in trans_call_inner") @@ -718,11 +725,11 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, let call_info = match debug_loc { DebugLoc::At(id, span) => NodeIdAndSpan { id: id, span: span }, DebugLoc::None => { - bcx.sess().bug("No call info for intrinsic call?") + bcx.with_fcx(fcx).sess().bug("No call info for intrinsic call?") } }; - return intrinsic::trans_intrinsic_call(bcx, node, callee_ty, + return intrinsic::trans_intrinsic_call(&mut bcx.with_fcx(fcx), node, callee_ty, arg_cleanup_scope, args, dest.unwrap(), substs, call_info); @@ -731,8 +738,8 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, assert!(dest.is_some()); fcx.pop_custom_cleanup_scope(arg_cleanup_scope); - let ctor_ty = callee_ty.subst(bcx.tcx(), &substs); - return base::trans_named_tuple_constructor(bcx, + let ctor_ty = callee_ty.subst(bcx.with_fcx(fcx).tcx(), &substs); + return base::trans_named_tuple_constructor(&mut bcx.with_fcx(fcx), ctor_ty, disr, args, @@ -758,14 +765,14 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, }; if !is_rust_fn || type_of::return_uses_outptr(ccx, ret_ty) || - bcx.fcx.type_needs_drop(ret_ty) { + fcx.type_needs_drop(ret_ty) { // Push the out-pointer if we use an out-pointer for this // return type, otherwise push "undef". if common::type_is_zero_size(ccx, ret_ty) { let llty = type_of::type_of(ccx, ret_ty); Some(common::C_undef(llty.ptr_to())) } else { - Some(alloc_ty(bcx, ret_ty, "__llret")) + Some(alloc_ty(&mut bcx.with_fcx(fcx), ret_ty, "__llret")) } } else { None @@ -793,8 +800,9 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, if llformal_ret_ty != llret_ty { // this could happen due to e.g. subtyping debug!("casting actual return type ({}) to match formal ({})", - bcx.llty_str(llret_ty), bcx.llty_str(llformal_ret_ty)); - llretslot = PointerCast(bcx, llretslot, llformal_ret_ty); + bcx.with_fcx(fcx).llty_str(llret_ty), + bcx.with_fcx(fcx).llty_str(llformal_ret_ty)); + llretslot = PointerCast(&mut bcx.with_fcx(fcx), llretslot, llformal_ret_ty); } llargs.push(llretslot); } @@ -808,7 +816,7 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, } // Push the arguments. - bcx = trans_args(bcx, + bcx = trans_args(&mut bcx.with_fcx(fcx), args, callee_ty, &mut llargs, @@ -816,10 +824,10 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, llself.is_some(), abi); - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); + fcx.scopes.last_mut().unwrap().drop_non_lifetime_clean(); // Invoke the actual rust fn and update bcx/llresult. - let (llret, b) = base::invoke(bcx, + let (llret, b) = base::invoke(&mut bcx.with_fcx(fcx), llfn, &llargs[..], callee_ty, @@ -831,10 +839,10 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, // the return value, copy it into llretslot. match (opt_llretslot, ret_ty) { (Some(llretslot), ty::FnConverging(ret_ty)) => { - if !type_of::return_uses_outptr(bcx.ccx(), ret_ty) && - !common::type_is_zero_size(bcx.ccx(), ret_ty) + if !type_of::return_uses_outptr(bcx.with_fcx(fcx).ccx(), ret_ty) && + !common::type_is_zero_size(bcx.with_fcx(fcx).ccx(), ret_ty) { - store_ty(bcx, llret, llretslot, ret_ty) + store_ty(&mut bcx.with_fcx(fcx), llret, llretslot, ret_ty) } } (_, _) => {} @@ -846,19 +854,20 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, let mut llargs = Vec::new(); let arg_tys = match args { - ArgExprs(a) => a.iter().map(|x| common::expr_ty(bcx, &**x)).collect(), + ArgExprs(a) => a.iter().map(|x| common::expr_ty(&mut bcx.with_fcx(fcx), &**x)) + .collect(), _ => panic!("expected arg exprs.") }; - bcx = trans_args(bcx, + bcx = trans_args(&mut bcx.with_fcx(fcx), args, callee_ty, &mut llargs, cleanup::CustomScope(arg_cleanup_scope), false, abi); - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); + fcx.scopes.last_mut().unwrap().drop_non_lifetime_clean(); - bcx = foreign::trans_native_call(bcx, + bcx = foreign::trans_native_call(&mut bcx.with_fcx(fcx), callee_ty, llfn, opt_llretslot.unwrap(), @@ -874,17 +883,17 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, match (dest, opt_llretslot, ret_ty) { (Some(expr::Ignore), Some(llretslot), ty::FnConverging(ret_ty)) => { // drop the value if it is not being saved. - bcx = glue::drop_ty(bcx, + bcx = glue::drop_ty(&mut bcx.with_fcx(fcx), llretslot, ret_ty, debug_loc); - call_lifetime_end(bcx, llretslot); + call_lifetime_end(&mut bcx.with_fcx(fcx), llretslot); } _ => {} } if ret_ty == ty::FnDiverging { - Unreachable(bcx); + Unreachable(&mut bcx.with_fcx(fcx)); } Result::new(bcx, llresult) @@ -911,15 +920,16 @@ pub enum CallArgs<'a, 'tcx> { ArgOverloadedCall(Vec<&'a ast::Expr>), } -fn trans_args_under_call_abi<'blk, 'tcx>( - mut bcx: Block<'blk, 'tcx>, - arg_exprs: &[P], - fn_ty: Ty<'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId, - ignore_self: bool) - -> Block<'blk, 'tcx> +fn trans_args_under_call_abi<'r, 'blk, 'tcx>( + &mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + arg_exprs: &[P], + fn_ty: Ty<'tcx>, + llargs: &mut Vec, + arg_cleanup_scope: cleanup::ScopeId, + ignore_self: bool) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); let args = ty::erase_late_bound_regions( bcx.tcx(), &ty::ty_fn_args(fn_ty)); @@ -951,21 +961,21 @@ fn trans_args_under_call_abi<'blk, 'tcx>( tuple_expr.id)); let repr = adt::represent_type(bcx.ccx(), tuple_type); let repr_ptr = &*repr; - llargs.extend(field_types.iter().enumerate().map(|(i, field_type)| { + for (i, field_type) in field_types.iter().enumerate() { let arg_datum = tuple_lvalue_datum.get_element( bcx, field_type, - |srcval| { + |bcx, srcval| { adt::trans_field_ptr(bcx, repr_ptr, srcval, 0, i) }).to_expr_datum(); - unpack_result!(bcx, trans_arg_datum( + llargs.push(unpack_result!(bcx, trans_arg_datum( bcx, field_type, arg_datum, arg_cleanup_scope, DontAutorefArg) - ) - })); + )); + } } _ => { bcx.sess().span_bug(tuple_expr.span, @@ -973,17 +983,19 @@ fn trans_args_under_call_abi<'blk, 'tcx>( } }; - bcx + bcx.bl } -fn trans_overloaded_call_args<'blk, 'tcx>( - mut bcx: Block<'blk, 'tcx>, - arg_exprs: Vec<&ast::Expr>, - fn_ty: Ty<'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId, - ignore_self: bool) - -> Block<'blk, 'tcx> { +fn trans_overloaded_call_args<'r, 'blk, 'tcx>( + &mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + arg_exprs: Vec<&ast::Expr>, + fn_ty: Ty<'tcx>, + llargs: &mut Vec, + arg_cleanup_scope: cleanup::ScopeId, + ignore_self: bool) + -> &'blk Block +{ + let mut bcx = &mut bl.with_fcx(fcx); // Translate the `self` argument first. let arg_tys = ty::erase_late_bound_regions(bcx.tcx(), &ty::ty_fn_args(fn_ty)); if !ignore_self { @@ -1019,25 +1031,26 @@ fn trans_overloaded_call_args<'blk, 'tcx>( } }; - bcx + bcx.bl } -pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>, - args: CallArgs<'a, 'tcx>, - fn_ty: Ty<'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId, - ignore_self: bool, - abi: synabi::Abi) - -> Block<'blk, 'tcx> { +pub fn trans_args<'a, 'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + args: CallArgs<'a, 'tcx>, + fn_ty: Ty<'tcx>, + llargs: &mut Vec, + arg_cleanup_scope: cleanup::ScopeId, + ignore_self: bool, + abi: synabi::Abi) + -> &'blk Block { debug!("trans_args(abi={})", abi); + let mut bcx = &mut bl.with_fcx(fcx); + let _icx = push_ctxt("trans_args"); - let arg_tys = ty::erase_late_bound_regions(cx.tcx(), &ty::ty_fn_args(fn_ty)); + let arg_tys = ty::erase_late_bound_regions(bcx.tcx(), &ty::ty_fn_args(fn_ty)); let variadic = ty::fn_is_variadic(fn_ty); - let mut bcx = cx; - // First we figure out the caller's view of the types of the arguments. // This will be needed if this is a generic call, because the callee has // to cast her view of the arguments to the caller's view. @@ -1046,7 +1059,7 @@ pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>, if abi == synabi::RustCall { // This is only used for direct calls to the `call`, // `call_mut` or `call_once` functions. - return trans_args_under_call_abi(cx, + return trans_args_under_call_abi(bcx, arg_exprs, fn_ty, llargs, @@ -1061,7 +1074,7 @@ pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>, } let arg_ty = if i >= num_formal_args { assert!(variadic); - common::expr_ty_adjusted(cx, &**arg_expr) + common::expr_ty_adjusted(bcx, &**arg_expr) } else { arg_tys[i] }; @@ -1075,7 +1088,7 @@ pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>, } } ArgOverloadedCall(arg_exprs) => { - return trans_overloaded_call_args(cx, + return trans_overloaded_call_args(bcx, arg_exprs, fn_ty, llargs, @@ -1105,7 +1118,7 @@ pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>, } } - bcx + bcx.bl } #[derive(Copy, Clone)] @@ -1114,14 +1127,15 @@ pub enum AutorefArg { DoAutorefArg(ast::NodeId) } -pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - formal_arg_ty: Ty<'tcx>, - arg_datum: Datum<'tcx, Expr>, - arg_cleanup_scope: cleanup::ScopeId, - autoref_arg: AutorefArg) - -> Result<'blk, 'tcx> { +pub fn trans_arg_datum<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + formal_arg_ty: Ty<'tcx>, + arg_datum: Datum<'tcx, Expr>, + arg_cleanup_scope: cleanup::ScopeId, + autoref_arg: AutorefArg) + -> Result<'blk> { let _icx = push_ctxt("trans_arg_datum"); - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let ccx = bcx.ccx(); debug!("trans_arg_datum({})", @@ -1170,5 +1184,5 @@ pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } debug!("--- trans_arg_datum passing {}", bcx.val_to_string(val)); - Result::new(bcx, val) + Result::new(bcx.bl, val) } diff --git a/src/librustc_trans/trans/cleanup.rs b/src/librustc_trans/trans/cleanup.rs index d23543924dd39..4c545c4a2347b 100644 --- a/src/librustc_trans/trans/cleanup.rs +++ b/src/librustc_trans/trans/cleanup.rs @@ -124,13 +124,14 @@ use trans::base; use trans::build; use trans::callee; use trans::common; -use trans::common::{Block, FunctionContext, ExprId, NodeIdAndSpan}; +use trans::common::{BlockContext, Block, FunctionContext, ExprId, NodeIdAndSpan}; use trans::debuginfo::{DebugLoc, ToDebugLoc}; use trans::declare; use trans::glue; use middle::region; use trans::type_::Type; use middle::ty::{self, Ty}; +use std::cell::Cell; use std::fmt; use syntax::ast; use util::ppaux::Repr; @@ -141,7 +142,7 @@ pub struct CleanupScope<'blk, 'tcx: 'blk> { // cleanup miscellaneous garbage that trans may generate whose // lifetime is a subset of some expression. See module doc for // more details. - kind: CleanupScopeKind<'blk, 'tcx>, + kind: CleanupScopeKind<'blk>, // Cleanups to run upon scope exit. cleanups: Vec>, @@ -151,7 +152,7 @@ pub struct CleanupScope<'blk, 'tcx: 'blk> { debug_loc: DebugLoc, cached_early_exits: Vec, - cached_landing_pad: Option, + cached_landing_pad: Cell>, } #[derive(Copy, Clone, Debug)] @@ -163,13 +164,13 @@ pub const EXIT_BREAK: usize = 0; pub const EXIT_LOOP: usize = 1; pub const EXIT_MAX: usize = 2; -pub enum CleanupScopeKind<'blk, 'tcx: 'blk> { +pub enum CleanupScopeKind<'blk> { CustomScopeKind, AstScopeKind(ast::NodeId), - LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX]) + LoopScopeKind(ast::NodeId, [&'blk Block; EXIT_MAX]) } -impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> { +impl<'blk> fmt::Debug for CleanupScopeKind<'blk> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CustomScopeKind => write!(f, "CustomScopeKind"), @@ -202,10 +203,10 @@ pub trait Cleanup<'tcx> { fn must_unwind(&self) -> bool; fn clean_on_unwind(&self) -> bool; fn is_lifetime_end(&self) -> bool; - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx>; + fn trans<'r,'blk>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + debug_loc: DebugLoc) + -> &'blk Block; } pub type CleanupObj<'tcx> = Box+'tcx>; @@ -218,7 +219,7 @@ pub enum ScopeId { impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Invoked when we start to trans the code contained within a new cleanup scope. - fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) { + fn push_ast_cleanup_scope(&mut self, debug_loc: NodeIdAndSpan) { debug!("push_ast_cleanup_scope({})", self.ccx.tcx().map.node_to_string(debug_loc.id)); @@ -251,16 +252,15 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { debug_loc.debug_loc())); } - fn push_loop_cleanup_scope(&self, + fn push_loop_cleanup_scope(&mut self, id: ast::NodeId, - exits: [Block<'blk, 'tcx>; EXIT_MAX]) { + exits: [&'blk Block; EXIT_MAX]) { debug!("push_loop_cleanup_scope({})", self.ccx.tcx().map.node_to_string(id)); assert_eq!(Some(id), self.top_ast_scope()); // Just copy the debuginfo source location from the enclosing scope let debug_loc = self.scopes - .borrow() .last() .unwrap() .debug_loc; @@ -268,13 +268,12 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc)); } - fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { + fn push_custom_cleanup_scope(&mut self) -> CustomScopeIndex { let index = self.scopes_len(); debug!("push_custom_cleanup_scope(): {}", index); // Just copy the debuginfo source location from the enclosing scope let debug_loc = self.scopes - .borrow() .last() .map(|opt_scope| opt_scope.debug_loc) .unwrap_or(DebugLoc::None); @@ -283,7 +282,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { CustomScopeIndex { index: index } } - fn push_custom_cleanup_scope_with_debug_loc(&self, + fn push_custom_cleanup_scope_with_debug_loc(&mut self, debug_loc: NodeIdAndSpan) -> CustomScopeIndex { let index = self.scopes_len(); @@ -296,10 +295,10 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup /// stack, and generates the code to do its cleanups for normal exit. - fn pop_and_trans_ast_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, + fn pop_and_trans_ast_cleanup_scope(&mut self, + bcx: &'blk Block, cleanup_scope: ast::NodeId) - -> Block<'blk, 'tcx> { + -> &'blk Block { debug!("pop_and_trans_ast_cleanup_scope({})", self.ccx.tcx().map.node_to_string(cleanup_scope)); @@ -312,7 +311,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by /// branching to a block generated by `normal_exit_block`. - fn pop_loop_cleanup_scope(&self, + fn pop_loop_cleanup_scope(&mut self, cleanup_scope: ast::NodeId) { debug!("pop_loop_cleanup_scope({})", self.ccx.tcx().map.node_to_string(cleanup_scope)); @@ -324,7 +323,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Removes the top cleanup scope from the stack without executing its cleanups. The top /// cleanup scope must be the temporary scope `custom_scope`. - fn pop_custom_cleanup_scope(&self, + fn pop_custom_cleanup_scope(&mut self, custom_scope: CustomScopeIndex) { debug!("pop_custom_cleanup_scope({})", custom_scope.index); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); @@ -333,10 +332,10 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Removes the top cleanup scope from the stack, which must be a temporary scope, and /// generates the code to do its cleanups for normal exit. - fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, + fn pop_and_trans_custom_cleanup_scope(&mut self, + bcx: &'blk Block, custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx> { + -> &'blk Block { debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); @@ -346,7 +345,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Returns the id of the top-most loop scope fn top_loop_scope(&self) -> ast::NodeId { - for scope in self.scopes.borrow().iter().rev() { + for scope in self.scopes.iter().rev() { if let LoopScopeKind(id, _) = scope.kind { return id; } @@ -356,7 +355,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Returns a block to branch to which will perform all pending cleanups and then /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope` - fn normal_exit_block(&'blk self, + fn normal_exit_block(&mut self, cleanup_scope: ast::NodeId, exit: usize) -> BasicBlockRef { self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit)) @@ -364,11 +363,11 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Returns a block to branch to which will perform all pending cleanups and then return from /// this function - fn return_exit_block(&'blk self) -> BasicBlockRef { + fn return_exit_block(&mut self) -> BasicBlockRef { self.trans_cleanups_to_exit_scope(ReturnExit) } - fn schedule_lifetime_end(&self, + fn schedule_lifetime_end(&mut self, cleanup_scope: ScopeId, val: ValueRef) { let drop = box LifetimeEnd { @@ -383,7 +382,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { } /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` - fn schedule_drop_mem(&self, + fn schedule_drop_mem(&mut self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>) { @@ -408,7 +407,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { } /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty` - fn schedule_drop_and_fill_mem(&self, + fn schedule_drop_and_fill_mem(&mut self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>) { @@ -438,7 +437,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - fn schedule_drop_adt_contents(&self, + fn schedule_drop_adt_contents(&mut self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>) { @@ -466,7 +465,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { } /// Schedules a (deep) drop of `val`, which is an instance of `ty` - fn schedule_drop_immediate(&self, + fn schedule_drop_immediate(&mut self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>) { @@ -492,7 +491,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { } /// Schedules a call to `free(val)`. Note that this is a shallow operation. - fn schedule_free_value(&self, + fn schedule_free_value(&mut self, cleanup_scope: ScopeId, val: ValueRef, heap: Heap, @@ -507,7 +506,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { self.schedule_clean(cleanup_scope, drop as CleanupObj); } - fn schedule_clean(&self, + fn schedule_clean(&mut self, cleanup_scope: ScopeId, cleanup: CleanupObj<'tcx>) { match cleanup_scope { @@ -519,13 +518,13 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary /// scope. - fn schedule_clean_in_ast_scope(&self, + fn schedule_clean_in_ast_scope(&mut self, cleanup_scope: ast::NodeId, cleanup: CleanupObj<'tcx>) { debug!("schedule_clean_in_ast_scope(cleanup_scope={})", cleanup_scope); - for scope in self.scopes.borrow_mut().iter_mut().rev() { + for scope in self.scopes.iter_mut().rev() { if scope.kind.is_ast_with_id(cleanup_scope) { scope.cleanups.push(cleanup); scope.clear_cached_exits(); @@ -542,7 +541,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { } /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. - fn schedule_clean_in_custom_scope(&self, + fn schedule_clean_in_custom_scope(&mut self, custom_scope: CustomScopeIndex, cleanup: CleanupObj<'tcx>) { debug!("schedule_clean_in_custom_scope(custom_scope={})", @@ -550,20 +549,19 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { assert!(self.is_valid_custom_scope(custom_scope)); - let mut scopes = self.scopes.borrow_mut(); - let scope = &mut (*scopes)[custom_scope.index]; + let scope = &mut (*self.scopes)[custom_scope.index]; scope.cleanups.push(cleanup); scope.clear_cached_exits(); } /// Returns true if there are pending cleanups that should execute on panic. fn needs_invoke(&self) -> bool { - self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) + self.scopes.iter().rev().any(|s| s.needs_invoke()) } /// Returns a basic block to branch to in the event of a panic. This block will run the panic /// cleanups and eventually invoke the LLVM `Resume` instruction. - fn get_landing_pad(&'blk self) -> BasicBlockRef { + fn get_landing_pad(&mut self) -> BasicBlockRef { let _icx = base::push_ctxt("get_landing_pad"); debug!("get_landing_pad"); @@ -598,7 +596,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Returns the id of the current top-most AST scope, if any. fn top_ast_scope(&self) -> Option { - for scope in self.scopes.borrow().iter().rev() { + for scope in self.scopes.iter().rev() { match scope.kind { CustomScopeKind | LoopScopeKind(..) => {} AstScopeKind(i) => { @@ -610,52 +608,51 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx } fn top_nonempty_cleanup_scope(&self) -> Option { - self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty()) + self.scopes.iter().rev().position(|s| !s.cleanups.is_empty()) } fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { self.is_valid_custom_scope(custom_scope) && - custom_scope.index == self.scopes.borrow().len() - 1 + custom_scope.index == self.scopes.len() - 1 } fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { - let scopes = self.scopes.borrow(); - custom_scope.index < scopes.len() && - (*scopes)[custom_scope.index].kind.is_temp() + custom_scope.index < self.scopes.len() && + (*self.scopes)[custom_scope.index].kind.is_temp() } /// Generates the cleanups for `scope` into `bcx` - fn trans_scope_cleanups(&self, // cannot borrow self, will recurse - bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> { + fn trans_scope_cleanups(&mut self, // cannot borrow self, will recurse + bcx: &'blk Block, + scope: &CleanupScope<'blk, 'tcx>) -> &'blk Block { let mut bcx = bcx; if !bcx.unreachable.get() { for cleanup in scope.cleanups.iter().rev() { - bcx = cleanup.trans(bcx, scope.debug_loc); + bcx = cleanup.trans(&mut bcx.with_fcx(self), scope.debug_loc); } } bcx } fn scopes_len(&self) -> usize { - self.scopes.borrow().len() + self.scopes.len() } - fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) { - self.scopes.borrow_mut().push(scope) + fn push_scope(&mut self, scope: CleanupScope<'blk, 'tcx>) { + self.scopes.push(scope) } - fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> { + fn pop_scope(&mut self) -> CleanupScope<'blk, 'tcx> { debug!("popping cleanup scope {}, {} scopes remaining", self.top_scope(|s| s.block_name("")), self.scopes_len() - 1); - self.scopes.borrow_mut().pop().unwrap() + self.scopes.pop().unwrap() } fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R { - f(self.scopes.borrow().last().unwrap()) + f(self.scopes.last().unwrap()) } /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or @@ -680,7 +677,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the /// `break_blk`. - fn trans_cleanups_to_exit_scope(&'blk self, + fn trans_cleanups_to_exit_scope(&mut self, label: EarlyExitLabel) -> BasicBlockRef { debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", @@ -704,10 +701,10 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx UnwindExit => { // Generate a block that will `Resume`. let prev_bcx = self.new_block(true, "resume", None); - let personality = self.personality.get().expect( + let personality = self.personality.expect( "create_landing_pad() should have set this"); - build::Resume(prev_bcx, - build::Load(prev_bcx, personality)); + let ld = build::Load(&mut prev_bcx.with_fcx(self), personality); + build::Resume(&mut prev_bcx.with_fcx(self), ld); prev_llbb = prev_bcx.llbb; break; } @@ -788,17 +785,17 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx { let name = scope.block_name("clean"); debug!("generating cleanups for {}", name); - let bcx_in = self.new_block(label.is_unwind(), - &name[..], - None); + let bcx_in = &*self.new_block(label.is_unwind(), + &name[..], + None); let mut bcx_out = bcx_in; for cleanup in scope.cleanups.iter().rev() { if cleanup_is_suitable_for(&**cleanup, label) { - bcx_out = cleanup.trans(bcx_out, + bcx_out = cleanup.trans(&mut bcx_out.with_fcx(self), scope.debug_loc); } } - build::Br(bcx_out, prev_llbb, DebugLoc::None); + build::Br(&mut bcx_out.with_fcx(self), prev_llbb, DebugLoc::None); prev_llbb = bcx_in.llbb; } else { debug!("no suitable cleanups in {}", @@ -823,21 +820,20 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx /// /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not /// in this function itself.) - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { + fn get_or_create_landing_pad(&mut self) -> BasicBlockRef { let pad_bcx; debug!("get_or_create_landing_pad"); // Check if a landing pad block exists; if not, create one. { - let mut scopes = self.scopes.borrow_mut(); - let last_scope = scopes.last_mut().unwrap(); - match last_scope.cached_landing_pad { + let last_scope = self.scopes.last().unwrap(); + match last_scope.cached_landing_pad.get() { Some(llbb) => { return llbb; } None => { let name = last_scope.block_name("unwind"); pad_bcx = self.new_block(true, &name[..], None); - last_scope.cached_landing_pad = Some(pad_bcx.llbb); + last_scope.cached_landing_pad.set(Some(pad_bcx.llbb)); } } } @@ -857,10 +853,10 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx // this function, so we just codegen a generic reference to it. We don't // specify any of the types for the function, we just make it a symbol // that LLVM can later use. - let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() { + let llpersonality = match pad_bcx.with_fcx(self).tcx().lang_items.eh_personality() { Some(def_id) => { - callee::trans_fn_ref(pad_bcx.ccx(), def_id, ExprId(0), - pad_bcx.fcx.param_substs).val + callee::trans_fn_ref(pad_bcx.with_fcx(self).ccx(), def_id, ExprId(0), + self.param_substs).val } None => { let mut personality = self.ccx.eh_personality().borrow_mut(); @@ -878,34 +874,34 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx }; // The only landing pad clause will be 'cleanup' - let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1); + let llretval = build::LandingPad(&mut pad_bcx.with_fcx(self), llretty, llpersonality, 1); // The landing pad block is a cleanup - build::SetCleanup(pad_bcx, llretval); + build::SetCleanup(&mut pad_bcx.with_fcx(self), llretval); // We store the retval in a function-central alloca, so that calls to // Resume can find it. - match self.personality.get() { + match self.personality { Some(addr) => { - build::Store(pad_bcx, llretval, addr); + build::Store(&mut pad_bcx.with_fcx(self), llretval, addr); } None => { - let addr = base::alloca(pad_bcx, common::val_ty(llretval), ""); - self.personality.set(Some(addr)); - build::Store(pad_bcx, llretval, addr); + let addr = base::alloca(&mut pad_bcx.with_fcx(self), common::val_ty(llretval), ""); + self.personality = Some(addr); + build::Store(&mut pad_bcx.with_fcx(self), llretval, addr); } } // Generate the cleanup block and branch to it. let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit); - build::Br(pad_bcx, cleanup_llbb, DebugLoc::None); + build::Br(&mut pad_bcx.with_fcx(self), cleanup_llbb, DebugLoc::None); return pad_bcx.llbb; } } impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { - fn new(kind: CleanupScopeKind<'blk, 'tcx>, + fn new(kind: CleanupScopeKind<'blk>, debug_loc: DebugLoc) -> CleanupScope<'blk, 'tcx> { CleanupScope { @@ -913,13 +909,13 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { debug_loc: debug_loc, cleanups: vec!(), cached_early_exits: vec!(), - cached_landing_pad: None, + cached_landing_pad: Cell::new(None), } } fn clear_cached_exits(&mut self) { self.cached_early_exits = vec!(); - self.cached_landing_pad = None; + self.cached_landing_pad.set(None); } fn cached_early_exit(&self, @@ -941,7 +937,7 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { /// True if this scope has cleanups that need unwinding fn needs_invoke(&self) -> bool { - self.cached_landing_pad.is_some() || + self.cached_landing_pad.get().is_some() || self.cleanups.iter().any(|c| c.must_unwind()) } @@ -959,7 +955,7 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { } } -impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> { +impl<'blk, 'tcx> CleanupScopeKind<'blk> { fn is_temp(&self) -> bool { match *self { CustomScopeKind => true, @@ -1027,25 +1023,25 @@ impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { false } - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { + fn trans<'r, 'blk>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + debug_loc: DebugLoc) + -> &'blk Block { let skip_dtor = self.skip_dtor; let _icx = if skip_dtor { base::push_ctxt("::trans skip_dtor=true") } else { base::push_ctxt("::trans skip_dtor=false") }; - let bcx = if self.is_immediate { + let bl = if self.is_immediate { glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) } else { glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) }; if self.fill_on_drop { - base::drop_done_fill_mem(bcx, self.val, self.ty); + base::drop_done_fill_mem(&mut bl.with_fcx(bcx.fcx), self.val, self.ty); } - bcx + bl } } @@ -1074,10 +1070,10 @@ impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> { false } - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { + fn trans<'r, 'blk>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + debug_loc: DebugLoc) + -> &'blk Block { match self.heap { HeapExchange => { glue::trans_exchange_free_ty(bcx, @@ -1107,13 +1103,13 @@ impl<'tcx> Cleanup<'tcx> for LifetimeEnd { true } - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { + fn trans<'r, 'blk>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + debug_loc: DebugLoc) + -> &'blk Block { debug_loc.apply(bcx.fcx); base::call_lifetime_end(bcx, self.ptr); - bcx + bcx.bl } } @@ -1150,66 +1146,66 @@ fn cleanup_is_suitable_for(c: &Cleanup, // These traits just exist to put the methods into this file. pub trait CleanupMethods<'blk, 'tcx> { - fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan); - fn push_loop_cleanup_scope(&self, + fn push_ast_cleanup_scope(&mut self, id: NodeIdAndSpan); + fn push_loop_cleanup_scope(&mut self, id: ast::NodeId, - exits: [Block<'blk, 'tcx>; EXIT_MAX]); - fn push_custom_cleanup_scope(&self) -> CustomScopeIndex; - fn push_custom_cleanup_scope_with_debug_loc(&self, + exits: [&'blk Block; EXIT_MAX]); + fn push_custom_cleanup_scope(&mut self) -> CustomScopeIndex; + fn push_custom_cleanup_scope_with_debug_loc(&mut self, debug_loc: NodeIdAndSpan) -> CustomScopeIndex; - fn pop_and_trans_ast_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, + fn pop_and_trans_ast_cleanup_scope(&mut self, + bcx: &'blk Block, cleanup_scope: ast::NodeId) - -> Block<'blk, 'tcx>; - fn pop_loop_cleanup_scope(&self, + -> &'blk Block; + fn pop_loop_cleanup_scope(&mut self, cleanup_scope: ast::NodeId); - fn pop_custom_cleanup_scope(&self, + fn pop_custom_cleanup_scope(&mut self, custom_scope: CustomScopeIndex); - fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, + fn pop_and_trans_custom_cleanup_scope(&mut self, + bcx: &'blk Block, custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx>; + -> &'blk Block; fn top_loop_scope(&self) -> ast::NodeId; - fn normal_exit_block(&'blk self, + fn normal_exit_block(&mut self, cleanup_scope: ast::NodeId, exit: usize) -> BasicBlockRef; - fn return_exit_block(&'blk self) -> BasicBlockRef; - fn schedule_lifetime_end(&self, + fn return_exit_block(&mut self) -> BasicBlockRef; + fn schedule_lifetime_end(&mut self, cleanup_scope: ScopeId, val: ValueRef); - fn schedule_drop_mem(&self, + fn schedule_drop_mem(&mut self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>); - fn schedule_drop_and_fill_mem(&self, + fn schedule_drop_and_fill_mem(&mut self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>); - fn schedule_drop_adt_contents(&self, + fn schedule_drop_adt_contents(&mut self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>); - fn schedule_drop_immediate(&self, + fn schedule_drop_immediate(&mut self, cleanup_scope: ScopeId, val: ValueRef, ty: Ty<'tcx>); - fn schedule_free_value(&self, + fn schedule_free_value(&mut self, cleanup_scope: ScopeId, val: ValueRef, heap: Heap, content_ty: Ty<'tcx>); - fn schedule_clean(&self, + fn schedule_clean(&mut self, cleanup_scope: ScopeId, cleanup: CleanupObj<'tcx>); - fn schedule_clean_in_ast_scope(&self, + fn schedule_clean_in_ast_scope(&mut self, cleanup_scope: ast::NodeId, cleanup: CleanupObj<'tcx>); - fn schedule_clean_in_custom_scope(&self, + fn schedule_clean_in_custom_scope(&mut self, custom_scope: CustomScopeIndex, cleanup: CleanupObj<'tcx>); fn needs_invoke(&self) -> bool; - fn get_landing_pad(&'blk self) -> BasicBlockRef; + fn get_landing_pad(&mut self) -> BasicBlockRef; } trait CleanupHelperMethods<'blk, 'tcx> { @@ -1217,15 +1213,15 @@ trait CleanupHelperMethods<'blk, 'tcx> { fn top_nonempty_cleanup_scope(&self) -> Option; fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; - fn trans_scope_cleanups(&self, - bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>; - fn trans_cleanups_to_exit_scope(&'blk self, + fn trans_scope_cleanups(&mut self, + bcx: &'blk Block, + scope: &CleanupScope<'blk, 'tcx>) -> &'blk Block; + fn trans_cleanups_to_exit_scope(&mut self, label: EarlyExitLabel) -> BasicBlockRef; - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef; + fn get_or_create_landing_pad(&mut self) -> BasicBlockRef; fn scopes_len(&self) -> usize; - fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>); - fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>; + fn push_scope(&mut self, scope: CleanupScope<'blk, 'tcx>); + fn pop_scope(&mut self) -> CleanupScope<'blk, 'tcx>; fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R; } diff --git a/src/librustc_trans/trans/closure.rs b/src/librustc_trans/trans/closure.rs index eb4acec25510a..e70a01a83a727 100644 --- a/src/librustc_trans/trans/closure.rs +++ b/src/librustc_trans/trans/closure.rs @@ -35,10 +35,10 @@ use syntax::ast; use syntax::ast_util; -fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - arg_scope_id: ScopeId, - freevars: &[ty::Freevar]) - -> Block<'blk, 'tcx> +fn load_closure_environment<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + arg_scope_id: ScopeId, + freevars: &[ty::Freevar]) + -> &'blk Block { let _icx = push_ctxt("closure::load_closure_environment"); @@ -52,7 +52,8 @@ fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let datum = rvalue_scratch_datum(bcx, self_type, "closure_env"); - store_ty(bcx, bcx.fcx.llenv.unwrap(), datum.val, self_type); + let e = bcx.fcx.llenv.unwrap(); + store_ty(bcx, e, datum.val, self_type); datum.val } else { bcx.fcx.llenv.unwrap() @@ -81,12 +82,13 @@ fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } }; let def_id = freevar.def.def_id(); - bcx.fcx.llupvars.borrow_mut().insert(def_id.node, upvar_ptr); + bcx.fcx.llupvars.insert(def_id.node, upvar_ptr); if kind == ty::FnOnceClosureKind && !captured_by_ref { + let ty = node_id_type(bcx, def_id.node); bcx.fcx.schedule_drop_mem(arg_scope_id, upvar_ptr, - node_id_type(bcx, def_id.node)) + ty) } if let Some(env_pointer_alloca) = env_pointer_alloca { @@ -100,7 +102,7 @@ fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } - bcx + bcx.bl } pub enum ClosureEnv<'a> { @@ -109,14 +111,14 @@ pub enum ClosureEnv<'a> { } impl<'a> ClosureEnv<'a> { - pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) - -> Block<'blk, 'tcx> + pub fn load<'r,'blk,'tcx>(self, bcx: &mut BlockContext<'r, 'blk, 'tcx>, arg_scope: ScopeId) + -> &'blk Block { match self { - ClosureEnv::NotClosure => bcx, + ClosureEnv::NotClosure => bcx.bl, ClosureEnv::Closure(freevars) => { if freevars.is_empty() { - bcx + bcx.bl } else { load_closure_environment(bcx, arg_scope, freevars) } @@ -181,20 +183,20 @@ pub fn get_or_create_declaration_if_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tc Some(Datum::new(llfn, function_type, Rvalue::new(ByValue))) } -pub enum Dest<'a, 'tcx: 'a> { - SaveIn(Block<'a, 'tcx>, ValueRef), +pub enum Dest<'r, 'a: 'r, 'tcx: 'a> { + SaveIn(&'r mut BlockContext<'r, 'a, 'tcx>, ValueRef), Ignore(&'a CrateContext<'a, 'tcx>) } -pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, - decl: &ast::FnDecl, - body: &ast::Block, - id: ast::NodeId, - param_substs: &'tcx Substs<'tcx>) - -> Option> +pub fn trans_closure_expr<'r, 'a, 'tcx>(mut dest: Dest<'r, 'a, 'tcx>, + decl: &ast::FnDecl, + body: &ast::Block, + id: ast::NodeId, + param_substs: &'tcx Substs<'tcx>) + -> Option<&'a Block> { let ccx = match dest { - Dest::SaveIn(bcx, _) => bcx.ccx(), + Dest::SaveIn(ref mut bcx, _) => bcx.ccx(), Dest::Ignore(ccx) => ccx }; let tcx = ccx.tcx(); @@ -235,13 +237,14 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, // Don't hoist this to the top of the function. It's perfectly legitimate // to have a zero-size closure (in which case dest will be `Ignore`) and // we must still generate the closure body. - let (mut bcx, dest_addr) = match dest { + let (&mut BlockContext { bl, ref mut fcx }, dest_addr) = match dest { Dest::SaveIn(bcx, p) => (bcx, p), Dest::Ignore(_) => { debug!("trans_closure() ignoring result"); return None; } }; + let mut bcx = &mut bl.with_fcx(fcx); let repr = adt::represent_type(ccx, node_id_type(bcx, id)); @@ -253,7 +256,7 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, closure_expr_id: id }; match tcx.upvar_capture(upvar_id).unwrap() { ty::UpvarCapture::ByValue => { - bcx = datum.store_to(bcx, upvar_slot_dest); + bcx.bl = datum.store_to(bcx, upvar_slot_dest); } ty::UpvarCapture::ByRef(..) => { Store(bcx, datum.to_llref(), upvar_slot_dest); @@ -262,7 +265,7 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, } adt::trans_set_discr(bcx, &*repr, dest_addr, 0); - Some(bcx) + Some(bcx.bl) } pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, @@ -392,7 +395,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( }); let sig = ty::erase_late_bound_regions(tcx, &llonce_bare_fn_ty.sig); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); + let (block_arena, mut fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = new_fn_ctxt(ccx, lloncefn, @@ -402,13 +405,13 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( substs, None, &block_arena); - let mut bcx = init_function(&fcx, false, sig.output); + let mut bcx = &mut init_function(&mut fcx, false, sig.output).with_fcx(&mut fcx); // the first argument (`self`) will be the (by value) closure env. - let self_scope = fcx.push_custom_cleanup_scope(); + let self_scope = bcx.fcx.push_custom_cleanup_scope(); let self_scope_id = CustomScope(self_scope); let rvalue_mode = datum::appropriate_rvalue_mode(ccx, closure_ty); - let llself = get_param(lloncefn, fcx.arg_pos(0) as u32); + let llself = get_param(lloncefn, bcx.fcx.arg_pos(0) as u32); let env_datum = Datum::new(llself, closure_ty, Rvalue::new(rvalue_mode)); let env_datum = unpack_datum!(bcx, env_datum.to_lvalue_datum_in_scope(bcx, "self", @@ -427,26 +430,26 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llargs: Vec<_> = input_tys.iter() .enumerate() - .map(|(i, _)| get_param(lloncefn, fcx.arg_pos(i+1) as u32)) + .map(|(i, _)| get_param(lloncefn, bcx.fcx.arg_pos(i+1) as u32)) .collect(); let dest = - fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot"))); + bcx.fcx.llretslotptr.map( + |_| expr::SaveIn(bcx.fcx.get_ret_slot(bcx.bl, sig.output, "ret_slot"))); let callee_data = TraitItem(MethodData { llfn: llreffn, llself: env_datum.val }); - bcx = callee::trans_call_inner(bcx, - DebugLoc::None, - llref_fn_ty, - |bcx, _| Callee { bcx: bcx, data: callee_data }, - ArgVals(&llargs), - dest).bcx; + bcx.bl = callee::trans_call_inner(bcx, + DebugLoc::None, + llref_fn_ty, + |bcx, _| Callee { bcx: bcx.bl, data: callee_data }, + ArgVals(&llargs), + dest).bcx; - fcx.pop_custom_cleanup_scope(self_scope); + bcx.fcx.pop_custom_cleanup_scope(self_scope); - finish_fn(&fcx, bcx, sig.output, DebugLoc::None); + finish_fn(bcx.fcx, bcx.bl, sig.output, DebugLoc::None); lloncefn } diff --git a/src/librustc_trans/trans/common.rs b/src/librustc_trans/trans/common.rs index 758702f54c049..646b9a941b19b 100644 --- a/src/librustc_trans/trans/common.rs +++ b/src/librustc_trans/trans/common.rs @@ -373,15 +373,15 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // immediate, this points to an alloca in the function. Otherwise, it's a // pointer to the hidden first parameter of the function. After function // construction, this should always be Some. - pub llretslotptr: Cell>, + pub llretslotptr: Option, // These pub elements: "hoisted basic blocks" containing // administrative activities that have to happen in only one place in // the function, due to LLVM's quirks. // A marker for the place where we want to insert the function's static // allocas, so that LLVM will coalesce them into a single alloca call. - pub alloca_insert_pt: Cell>, - pub llreturn: Cell>, + pub alloca_insert_pt: Option, + pub llreturn: Option, // If the function has any nested return's, including something like: // fn foo() -> Option { Some(Foo { x: return None }) }, then @@ -390,7 +390,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // The a value alloca'd for calls to upcalls.rust_personality. Used when // outputting the resume instruction. - pub personality: Cell>, + pub personality: Option, // True if the caller expects this fn to use the out pointer to // return. Either way, your code should write into the slot llretslotptr @@ -399,10 +399,10 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Maps the DefId's for local variables to the allocas created for // them in llallocas. - pub lllocals: RefCell>>, + pub lllocals: NodeMap>, // Same as above, but for closure upvars - pub llupvars: RefCell>, + pub llupvars: NodeMap, // The NodeId of the function, or -1 if it doesn't correspond to // a user-defined function. @@ -417,7 +417,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { pub span: Option, // The arena that blocks are allocated from. - pub block_arena: &'a TypedArena>, + pub block_arena: &'a TypedArena, // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, @@ -426,12 +426,16 @@ pub struct FunctionContext<'a, 'tcx: 'a> { pub debug_context: debuginfo::FunctionDebugContext, // Cleanup scopes. - pub scopes: RefCell>>, + pub scopes: Vec>, pub cfg: Option, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { + pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { + self.ccx.tcx() + } + pub fn arg_pos(&self, arg: usize) -> usize { let arg = self.env_arg_pos() + arg; if self.llenv.is_some() { @@ -449,80 +453,81 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } } - pub fn cleanup(&self) { + pub fn cleanup(&mut self) { unsafe { llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt - .get() .unwrap()); } } - pub fn get_llreturn(&self) -> BasicBlockRef { - if self.llreturn.get().is_none() { + pub fn get_llreturn(&mut self) -> BasicBlockRef { + if self.llreturn.is_none() { - self.llreturn.set(Some(unsafe { + self.llreturn = Some(unsafe { llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, "return\0".as_ptr() as *const _) - })) + }); } - self.llreturn.get().unwrap() + self.llreturn.unwrap() } - pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, + pub fn get_ret_slot(&mut self, bcx: &'a Block, output: ty::FnOutput<'tcx>, name: &str) -> ValueRef { if self.needs_ret_allocas { - base::alloca_no_lifetime(bcx, match output { - ty::FnConverging(output_type) => type_of::type_of(bcx.ccx(), output_type), - ty::FnDiverging => Type::void(bcx.ccx()) - }, name) + let ty = match output { + ty::FnConverging(output_type) => type_of::type_of(bcx.with_fcx(self).ccx(), + output_type), + ty::FnDiverging => Type::void(bcx.with_fcx(self).ccx()) + }; + base::alloca_no_lifetime(&mut bcx.with_fcx(self), ty, name) } else { - self.llretslotptr.get().unwrap() + self.llretslotptr.unwrap() } } - pub fn new_block(&'a self, + pub fn new_block(&self, is_lpad: bool, name: &str, opt_node_id: Option) - -> Block<'a, 'tcx> { + -> &'a mut Block { unsafe { let name = CString::new(name).unwrap(); let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, name.as_ptr()); - BlockS::new(llbb, is_lpad, opt_node_id, self) + Block::new(llbb, is_lpad, opt_node_id).alloc(self) } } - pub fn new_id_block(&'a self, + pub fn new_id_block(&mut self, name: &str, node_id: ast::NodeId) - -> Block<'a, 'tcx> { + -> &'a mut Block { self.new_block(false, name, Some(node_id)) } - pub fn new_temp_block(&'a self, + pub fn new_temp_block(&mut self, name: &str) - -> Block<'a, 'tcx> { + -> &'a mut Block { self.new_block(false, name, None) } - pub fn join_blocks(&'a self, + pub fn join_blocks(&mut self, id: ast::NodeId, - in_cxs: &[Block<'a, 'tcx>]) - -> Block<'a, 'tcx> { + in_cxs: &[&'a Block]) + -> &'a Block { let out = self.new_id_block("join", id); let mut reachable = false; for bcx in in_cxs { if !bcx.unreachable.get() { - build::Br(*bcx, out.llbb, DebugLoc::None); + build::Br(&mut bcx.with_fcx(self), out.llbb, DebugLoc::None); reachable = true; } } if !reachable { - build::Unreachable(out); + build::Unreachable(&mut out.with_fcx(self)); } return out; } @@ -547,7 +552,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { // code. Each basic block we generate is attached to a function, typically // with many basic blocks per function. All the basic blocks attached to a // function are organized as a directed graph. -pub struct BlockS<'blk, 'tcx: 'blk> { +pub struct Block { // The BasicBlockRef returned from a call to // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic // block to the function pointed to by llfn. We insert @@ -563,28 +568,49 @@ pub struct BlockS<'blk, 'tcx: 'blk> { // AST node-id associated with this block, if any. Used for // debugging purposes only. pub opt_node_id: Option, - - // The function context for the function to which this block is - // attached. - pub fcx: &'blk FunctionContext<'blk, 'tcx>, } -pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>; - -impl<'blk, 'tcx> BlockS<'blk, 'tcx> { +impl Block { pub fn new(llbb: BasicBlockRef, is_lpad: bool, - opt_node_id: Option, - fcx: &'blk FunctionContext<'blk, 'tcx>) - -> Block<'blk, 'tcx> { - fcx.block_arena.alloc(BlockS { + opt_node_id: Option) + -> Block { + Block { llbb: llbb, terminated: Cell::new(false), unreachable: Cell::new(false), is_lpad: is_lpad, opt_node_id: opt_node_id, - fcx: fcx - }) + } + } + + pub fn alloc<'r, 'blk, 'tcx>(self, fcx: &FunctionContext<'blk, 'tcx>) -> &'blk mut Block { + fcx.block_arena.alloc(self) + } + + pub fn with_fcx<'r, 'blk, 'tcx>(&'blk self, + fcx: &'r mut FunctionContext<'blk, 'tcx>) + -> BlockContext<'r, 'blk, 'tcx> { + BlockContext::new(self, fcx) + } +} + +pub struct BlockContext<'r, 'blk: 'r, 'tcx: 'blk> { + pub bl: &'blk Block, + + // The function context for the function to which this block is + // attached. + pub fcx: &'r mut FunctionContext<'blk, 'tcx>, +} + +impl<'r, 'blk, 'tcx> BlockContext<'r, 'blk, 'tcx> { + pub fn new(bl: &'blk Block, + fcx: &'r mut FunctionContext<'blk, 'tcx>) + -> BlockContext<'r, 'blk, 'tcx> { + BlockContext { + bl: bl, + fcx: fcx, + } } pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { @@ -633,7 +659,7 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { format!("[block {:p}]", self) } - pub fn monomorphize(&self, value: &T) -> T + pub fn monomorphize(&mut self, value: &T) -> T where T : TypeFoldable<'tcx> + Repr<'tcx> + HasProjectionTypes + Clone { monomorphize::apply_param_substs(self.tcx(), @@ -642,7 +668,7 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { } } -impl<'blk, 'tcx> mc::Typer<'tcx> for BlockS<'blk, 'tcx> { +impl<'r, 'blk, 'tcx> mc::Typer<'tcx> for BlockContext<'r, 'blk, 'tcx> { fn node_ty(&self, id: ast::NodeId) -> mc::McResult> { Ok(node_id_type(self, id)) } @@ -690,7 +716,7 @@ impl<'blk, 'tcx> mc::Typer<'tcx> for BlockS<'blk, 'tcx> { } } -impl<'blk, 'tcx> ty::ClosureTyper<'tcx> for BlockS<'blk, 'tcx> { +impl<'r, 'blk, 'tcx> ty::ClosureTyper<'tcx> for BlockContext<'r, 'blk, 'tcx> { fn param_env<'a>(&'a self) -> &'a ty::ParameterEnvironment<'a, 'tcx> { &self.fcx.param_env } @@ -722,13 +748,13 @@ impl<'blk, 'tcx> ty::ClosureTyper<'tcx> for BlockS<'blk, 'tcx> { } } -pub struct Result<'blk, 'tcx: 'blk> { - pub bcx: Block<'blk, 'tcx>, +pub struct Result<'blk> { + pub bcx: &'blk Block, pub val: ValueRef } -impl<'b, 'tcx> Result<'b, 'tcx> { - pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> { +impl<'b> Result<'b> { + pub fn new(bcx: &'b Block, val: ValueRef) -> Result<'b> { Result { bcx: bcx, val: val, @@ -908,7 +934,7 @@ pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef { } pub fn const_get_elt(cx: &CrateContext, v: ValueRef, us: &[c_uint]) - -> ValueRef { + -> ValueRef { unsafe { let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); @@ -970,21 +996,25 @@ pub fn is_null(val: ValueRef) -> bool { } } -pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { +pub fn monomorphize_type<'r, 'blk, 'tcx>(bcx: &BlockContext<'r, 'blk, 'tcx>, + t: Ty<'tcx>) + -> Ty<'tcx> { bcx.fcx.monomorphize(&t) } -pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> { +pub fn node_id_type<'r, 'blk, 'tcx>(bcx: &BlockContext<'r, 'blk, 'tcx>, + id: ast::NodeId) -> Ty<'tcx> { let tcx = bcx.tcx(); let t = ty::node_id_to_type(tcx, id); monomorphize_type(bcx, t) } -pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &ast::Expr) -> Ty<'tcx> { +pub fn expr_ty<'r, 'blk, 'tcx>(bcx: &BlockContext<'r, 'blk, 'tcx>, ex: &ast::Expr) -> Ty<'tcx> { node_id_type(bcx, ex.id) } -pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &ast::Expr) -> Ty<'tcx> { +pub fn expr_ty_adjusted<'r, 'blk, 'tcx>(bcx: &BlockContext<'r, 'blk, 'tcx>, + ex: &ast::Expr) -> Ty<'tcx> { monomorphize_type(bcx, ty::expr_ty_adjusted(bcx.tcx(), ex)) } @@ -1203,9 +1233,9 @@ pub enum ExprOrMethodCall { } pub fn node_id_substs<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - node: ExprOrMethodCall, - param_substs: &subst::Substs<'tcx>) - -> subst::Substs<'tcx> { + node: ExprOrMethodCall, + param_substs: &subst::Substs<'tcx>) + -> subst::Substs<'tcx> { let tcx = ccx.tcx(); let substs = match node { @@ -1227,7 +1257,7 @@ pub fn node_id_substs<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, &substs.erase_regions()) } -pub fn langcall(bcx: Block, +pub fn langcall(bcx: &mut BlockContext, span: Option, msg: &str, li: LangItem) diff --git a/src/librustc_trans/trans/controlflow.rs b/src/librustc_trans/trans/controlflow.rs index ab8cfa0ce3b7e..23f233dc48144 100644 --- a/src/librustc_trans/trans/controlflow.rs +++ b/src/librustc_trans/trans/controlflow.rs @@ -32,56 +32,55 @@ use syntax::parse::token::InternedString; use syntax::parse::token; use syntax::visit::Visitor; -pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - s: &ast::Stmt) - -> Block<'blk, 'tcx> { +pub fn trans_stmt<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + s: &ast::Stmt) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); + let _icx = push_ctxt("trans_stmt"); - let fcx = cx.fcx; - debug!("trans_stmt({})", s.repr(cx.tcx())); + debug!("trans_stmt({})", s.repr(bcx.tcx())); - if cx.unreachable.get() { - return cx; + if bcx.bl.unreachable.get() { + return bcx.bl; } - if cx.sess().asm_comments() { - add_span_comment(cx, s.span, &s.repr(cx.tcx())); + if bcx.sess().asm_comments() { + let r = s.repr(bcx.tcx()); + add_span_comment(bcx, s.span, &r); } - let mut bcx = cx; - let id = ast_util::stmt_id(s); let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); + bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc); match s.node { ast::StmtExpr(ref e, _) | ast::StmtSemi(ref e, _) => { - bcx = trans_stmt_semi(bcx, &**e); + bcx.bl = trans_stmt_semi(bcx, &**e); } ast::StmtDecl(ref d, _) => { match d.node { ast::DeclLocal(ref local) => { - bcx = init_local(bcx, &**local); + bcx.bl = init_local(bcx, &**local); debuginfo::create_local_var_metadata(bcx, &**local); } // Inner items are visited by `trans_item`/`trans_meth`. ast::DeclItem(_) => {}, } } - ast::StmtMac(..) => cx.tcx().sess.bug("unexpanded macro") + ast::StmtMac(..) => bcx.tcx().sess.bug("unexpanded macro") } - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, ast_util::stmt_id(s)); - - return bcx; + bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx.bl, ast_util::stmt_id(s)) } -pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &ast::Expr) - -> Block<'blk, 'tcx> { +pub fn trans_stmt_semi<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, e: &ast::Expr) + -> &'blk Block { let _icx = push_ctxt("trans_stmt_semi"); - if cx.unreachable.get() { - return cx; + if cx.bl.unreachable.get() { + return cx.bl; } let ty = expr_ty(cx, e); @@ -92,25 +91,25 @@ pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &ast::Expr) } } -pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - b: &ast::Block, - mut dest: expr::Dest) - -> Block<'blk, 'tcx> { +pub fn trans_block<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + b: &ast::Block, + mut dest: expr::Dest) + -> &'blk Block { let _icx = push_ctxt("trans_block"); - if bcx.unreachable.get() { - return bcx; + if bl.unreachable.get() { + return bl; } - let fcx = bcx.fcx; - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); + bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc); for s in &b.stmts { - bcx = trans_stmt(bcx, &**s); + bcx.bl = trans_stmt(bcx, &**s); } if dest != expr::Ignore { @@ -131,45 +130,44 @@ pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, match b.expr { Some(ref e) => { - if !bcx.unreachable.get() { - bcx = expr::trans_into(bcx, &**e, dest); + if !bcx.bl.unreachable.get() { + bcx.bl = expr::trans_into(bcx, &**e, dest); } } None => { - assert!(dest == expr::Ignore || bcx.unreachable.get()); + assert!(dest == expr::Ignore || bcx.bl.unreachable.get()); } } - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id); - - return bcx; + bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx.bl, b.id) } -pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - if_id: ast::NodeId, - cond: &ast::Expr, - thn: &ast::Block, - els: Option<&ast::Expr>, - dest: expr::Dest) - -> Block<'blk, 'tcx> { +pub fn trans_if<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + if_id: ast::NodeId, + cond: &ast::Expr, + thn: &ast::Block, + els: Option<&ast::Expr>, + dest: expr::Dest) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); + debug!("trans_if(bcx={}, if_id={}, cond={}, thn={}, dest={})", bcx.to_str(), if_id, bcx.expr_to_string(cond), thn.id, dest.to_string(bcx.ccx())); let _icx = push_ctxt("trans_if"); - if bcx.unreachable.get() { - return bcx; + if bcx.bl.unreachable.get() { + return bcx.bl; } - let mut bcx = bcx; - - let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool()); + let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool(bcx.fcx)); // Drop branches that are known to be impossible if let Some(cv) = const_to_opt_uint(cond_val) { if cv == 1 { // if true { .. } [else { .. }] - bcx = trans_block(bcx, &*thn, dest); + bcx.bl = trans_block(bcx, &*thn, dest); trans::debuginfo::clear_source_location(bcx.fcx); if let Some(elexpr) = els { @@ -182,17 +180,17 @@ pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, trans.visit_block(&*thn); if let Some(elexpr) = els { - bcx = expr::trans_into(bcx, &*elexpr, dest); + bcx.bl = expr::trans_into(bcx, &*elexpr, dest); trans::debuginfo::clear_source_location(bcx.fcx); } } - return bcx; + return bcx.bl; } let name = format!("then-block-{}-", thn.id); let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id); - let then_bcx_out = trans_block(then_bcx_in, &*thn, dest); + let then_bcx_out = trans_block(&mut then_bcx_in.with_fcx(bcx.fcx), &*thn, dest); trans::debuginfo::clear_source_location(bcx.fcx); let cond_source_loc = cond.debug_loc(); @@ -201,39 +199,38 @@ pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, match els { Some(elexpr) => { let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id); - let else_bcx_out = expr::trans_into(else_bcx_in, &*elexpr, dest); + let else_bcx_out = expr::trans_into(&mut else_bcx_in.with_fcx(bcx.fcx), &*elexpr, dest); next_bcx = bcx.fcx.join_blocks(if_id, &[then_bcx_out, else_bcx_out]); - CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc); + CondBr(&mut bcx, cond_val, + then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc); } None => { next_bcx = bcx.fcx.new_id_block("next-block", if_id); - Br(then_bcx_out, next_bcx.llbb, DebugLoc::None); + Br(&mut then_bcx_out.with_fcx(bcx.fcx), next_bcx.llbb, DebugLoc::None); CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc); } } // Clear the source location because it is still set to whatever has been translated // right before. - trans::debuginfo::clear_source_location(next_bcx.fcx); + trans::debuginfo::clear_source_location(bcx.fcx); next_bcx } -pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - loop_expr: &ast::Expr, - cond: &ast::Expr, - body: &ast::Block) - -> Block<'blk, 'tcx> { +pub fn trans_while<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + loop_expr: &ast::Expr, + cond: &ast::Expr, + body: &ast::Block) + -> &'blk Block { let _icx = push_ctxt("trans_while"); - if bcx.unreachable.get() { - return bcx; + if bcx.bl.unreachable.get() { + return bcx.bl; } - let fcx = bcx.fcx; - // bcx // | // cond_bcx_in <--------+ @@ -245,43 +242,42 @@ pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // | body_bcx_out --+ // next_bcx_in - let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id); - let cond_bcx_in = fcx.new_id_block("while_cond", cond.id); - let body_bcx_in = fcx.new_id_block("while_body", body.id); + let next_bcx_in = bcx.fcx.new_id_block("while_exit", loop_expr.id); + let cond_bcx_in = bcx.fcx.new_id_block("while_cond", cond.id); + let body_bcx_in = bcx.fcx.new_id_block("while_body", body.id); - fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]); + bcx.fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]); Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc()); // compile the block where we will handle loop cleanups - let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK); + let cleanup_llbb = bcx.fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK); // compile the condition let Result {bcx: cond_bcx_out, val: cond_val} = - expr::trans(cond_bcx_in, cond).to_llbool(); + expr::trans(&mut cond_bcx_in.with_fcx(bcx.fcx), cond).to_llbool(bcx.fcx); - CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc()); + CondBr(&mut cond_bcx_out.with_fcx(bcx.fcx), cond_val, + body_bcx_in.llbb, cleanup_llbb, cond.debug_loc()); // loop body: - let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore); - Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None); + let body_bcx_out = trans_block(&mut body_bcx_in.with_fcx(bcx.fcx), body, expr::Ignore); + Br(&mut body_bcx_out.with_fcx(bcx.fcx), cond_bcx_in.llbb, DebugLoc::None); - fcx.pop_loop_cleanup_scope(loop_expr.id); + bcx.fcx.pop_loop_cleanup_scope(loop_expr.id); return next_bcx_in; } -pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - loop_expr: &ast::Expr, - body: &ast::Block) - -> Block<'blk, 'tcx> { +pub fn trans_loop<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + loop_expr: &ast::Expr, + body: &ast::Block) + -> &'blk Block { let _icx = push_ctxt("trans_loop"); - if bcx.unreachable.get() { - return bcx; + if bcx.bl.unreachable.get() { + return bcx.bl; } - let fcx = bcx.fcx; - // bcx // | // body_bcx_in @@ -296,39 +292,37 @@ pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id); let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id); - fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]); + bcx.fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]); Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc()); - let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore); - Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None); + let body_bcx_out = trans_block(&mut body_bcx_in.with_fcx(bcx.fcx), body, expr::Ignore); + Br(&mut body_bcx_out.with_fcx(bcx.fcx), body_bcx_in.llbb, DebugLoc::None); - fcx.pop_loop_cleanup_scope(loop_expr.id); + bcx.fcx.pop_loop_cleanup_scope(loop_expr.id); // If there are no predecessors for the next block, we just translated an endless loop and the // next block is unreachable if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() { - Unreachable(next_bcx_in); + Unreachable(&mut next_bcx_in.with_fcx(bcx.fcx)); } return next_bcx_in; } -pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - opt_label: Option, - exit: usize) - -> Block<'blk, 'tcx> { +pub fn trans_break_cont<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + opt_label: Option, + exit: usize) + -> &'blk Block { let _icx = push_ctxt("trans_break_cont"); - if bcx.unreachable.get() { - return bcx; + if bcx.bl.unreachable.get() { + return bcx.bl; } - let fcx = bcx.fcx; - // Locate loop that we will break to let loop_id = match opt_label { - None => fcx.top_loop_scope(), + None => bcx.fcx.top_loop_scope(), Some(_) => { match bcx.tcx().def_map.borrow().get(&expr.id).map(|d| d.full_def()) { Some(def::DefLabel(loop_id)) => loop_id, @@ -340,69 +334,70 @@ pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, }; // Generate appropriate cleanup code and branch - let cleanup_llbb = fcx.normal_exit_block(loop_id, exit); + let cleanup_llbb = bcx.fcx.normal_exit_block(loop_id, exit); Br(bcx, cleanup_llbb, expr.debug_loc()); Unreachable(bcx); // anything afterwards should be ignored - return bcx; + bcx.bl } -pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - label_opt: Option) - -> Block<'blk, 'tcx> { +pub fn trans_break<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + label_opt: Option) + -> &'blk Block { return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK); } -pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - label_opt: Option) - -> Block<'blk, 'tcx> { +pub fn trans_cont<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + label_opt: Option) + -> &'blk Block { return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP); } -pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - return_expr: &ast::Expr, - retval_expr: Option<&ast::Expr>) - -> Block<'blk, 'tcx> { +pub fn trans_ret<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + return_expr: &ast::Expr, + retval_expr: Option<&ast::Expr>) + -> &'blk Block { let _icx = push_ctxt("trans_ret"); - if bcx.unreachable.get() { - return bcx; + if bl.unreachable.get() { + return bl; } - let fcx = bcx.fcx; - let mut bcx = bcx; - let dest = match (fcx.llretslotptr.get(), retval_expr) { + let mut bcx = &mut bl.with_fcx(fcx); + let dest = match (bcx.fcx.llretslotptr, retval_expr) { (Some(_), Some(retval_expr)) => { let ret_ty = expr_ty_adjusted(bcx, &*retval_expr); - expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(ret_ty), "ret_slot")) + expr::SaveIn(bcx.fcx.get_ret_slot(bcx.bl, ty::FnConverging(ret_ty), "ret_slot")) } _ => expr::Ignore, }; if let Some(x) = retval_expr { - bcx = expr::trans_into(bcx, &*x, dest); + bcx.bl = expr::trans_into(bcx, &*x, dest); match dest { - expr::SaveIn(slot) if fcx.needs_ret_allocas => { - Store(bcx, slot, fcx.llretslotptr.get().unwrap()); + expr::SaveIn(slot) if bcx.fcx.needs_ret_allocas => { + let p = bcx.fcx.llretslotptr.unwrap(); + Store(bcx, slot, p); } _ => {} } } - let cleanup_llbb = fcx.return_exit_block(); + let cleanup_llbb = bcx.fcx.return_exit_block(); Br(bcx, cleanup_llbb, return_expr.debug_loc()); Unreachable(bcx); - return bcx; + bcx.bl } -pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - fail_str: InternedString) - -> Block<'blk, 'tcx> { +pub fn trans_fail<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + call_info: NodeIdAndSpan, + fail_str: InternedString) + -> &'blk Block { let ccx = bcx.ccx(); let _icx = push_ctxt("trans_fail_value"); - if bcx.unreachable.get() { - return bcx; + if bcx.bl.unreachable.get() { + return bcx.bl; } let v_str = C_str_slice(ccx, fail_str); @@ -414,25 +409,25 @@ pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let expr_file_line = consts::addr_of(ccx, expr_file_line_const, "panic_loc"); let args = vec!(expr_file_line); let did = langcall(bcx, Some(call_info.span), "", PanicFnLangItem); - let bcx = callee::trans_lang_call(bcx, - did, - &args[..], - Some(expr::Ignore), - call_info.debug_loc()).bcx; - Unreachable(bcx); - return bcx; + let bl = callee::trans_lang_call(bcx, + did, + &args[..], + Some(expr::Ignore), + call_info.debug_loc()).bcx; + Unreachable(&mut bl.with_fcx(bcx.fcx)); + bl } -pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - index: ValueRef, - len: ValueRef) - -> Block<'blk, 'tcx> { +pub fn trans_fail_bounds_check<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + call_info: NodeIdAndSpan, + index: ValueRef, + len: ValueRef) + -> &'blk Block { let ccx = bcx.ccx(); let _icx = push_ctxt("trans_fail_bounds_check"); - if bcx.unreachable.get() { - return bcx; + if bcx.bl.unreachable.get() { + return bcx.bl; } // Extract the file/line from the span @@ -446,11 +441,11 @@ pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let file_line = consts::addr_of(ccx, file_line_const, "panic_bounds_check_loc"); let args = vec!(file_line, index, len); let did = langcall(bcx, Some(call_info.span), "", PanicBoundsCheckFnLangItem); - let bcx = callee::trans_lang_call(bcx, - did, - &args[..], - Some(expr::Ignore), - call_info.debug_loc()).bcx; - Unreachable(bcx); - return bcx; + let bl = callee::trans_lang_call(bcx, + did, + &args[..], + Some(expr::Ignore), + call_info.debug_loc()).bcx; + Unreachable(&mut bl.with_fcx(bcx.fcx)); + bl } diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs index dd32ed3bc1e59..70150b412d9f4 100644 --- a/src/librustc_trans/trans/datum.rs +++ b/src/librustc_trans/trans/datum.rs @@ -126,7 +126,7 @@ pub struct Datum<'tcx, K> { } pub struct DatumBlock<'blk, 'tcx: 'blk, K> { - pub bcx: Block<'blk, 'tcx>, + pub bcx: &'blk Block, pub datum: Datum<'tcx, K>, } @@ -174,35 +174,35 @@ pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue return Datum::new(val, ty, Rvalue::new(ByValue)); } -pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - return DatumBlock::new(bcx, immediate_rvalue(val, ty)) +pub fn immediate_rvalue_bcx<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + val: ValueRef, + ty: Ty<'tcx>) + -> DatumBlock<'blk, 'tcx, Rvalue> { + DatumBlock::new(bcx.bl, immediate_rvalue(val, ty)) } /// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to /// it. The memory will be dropped upon exit from `scope`. The callback `populate` should /// initialize the memory. -pub fn lvalue_scratch_datum<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str, - scope: cleanup::ScopeId, - arg: A, - populate: F) - -> DatumBlock<'blk, 'tcx, Lvalue> where - F: FnOnce(A, Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, +pub fn lvalue_scratch_datum<'r, 'blk, 'tcx, A, F>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + ty: Ty<'tcx>, + name: &str, + scope: cleanup::ScopeId, + arg: A, + populate: F) + -> DatumBlock<'blk, 'tcx, Lvalue> where + F: FnOnce(A, &mut BlockContext<'r, 'blk, 'tcx>, ValueRef) -> &'blk Block { let llty = type_of::type_of(bcx.ccx(), ty); let scratch = alloca(bcx, llty, name); // Subtle. Populate the scratch memory *before* scheduling cleanup. - let bcx = populate(arg, bcx, scratch); + let bl = populate(arg, bcx, scratch); bcx.fcx.schedule_lifetime_end(scope, scratch); bcx.fcx.schedule_drop_mem(scope, scratch, ty); - DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue)) + DatumBlock::new(bl, Datum::new(scratch, ty, Lvalue)) } /// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to @@ -210,10 +210,10 @@ pub fn lvalue_scratch_datum<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, /// necessary, but in the case of automatic rooting in match statements it is possible to have /// temporaries that may not get initialized if a certain arm is not taken, so we must zero them. /// You must arrange any cleanups etc yourself! -pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str) - -> Datum<'tcx, Rvalue> { +pub fn rvalue_scratch_datum<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + ty: Ty<'tcx>, + name: &str) + -> Datum<'tcx, Rvalue> { let llty = type_of::type_of(bcx.ccx(), ty); let scratch = alloca(bcx, llty, name); Datum::new(scratch, ty, Rvalue::new(ByRef)) @@ -231,7 +231,7 @@ pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode, - fcx: &FunctionContext<'a, 'tcx>, + fcx: &mut FunctionContext<'a, 'tcx>, scope: cleanup::ScopeId, val: ValueRef, ty: Ty<'tcx>) { @@ -248,11 +248,11 @@ pub trait KindOps { /// Take appropriate action after the value in `datum` has been /// stored to a new location. - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx>; + fn post_store<'r, 'blk, 'tcx>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + val: ValueRef, + ty: Ty<'tcx>) + -> &'blk Block; /// True if this mode is a reference mode, meaning that the datum's /// val field is a pointer to the actual value @@ -264,17 +264,17 @@ pub trait KindOps { } impl KindOps for Rvalue { - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - _val: ValueRef, - _ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { + fn post_store<'r, 'blk, 'tcx>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + _val: ValueRef, + _ty: Ty<'tcx>) + -> &'blk Block { // No cleanup is scheduled for an rvalue, so we don't have // to do anything after a move to cancel or duplicate it. if self.is_by_ref() { call_lifetime_end(bcx, _val); } - bcx + bcx.bl } fn is_by_ref(&self) -> bool { @@ -289,18 +289,18 @@ impl KindOps for Rvalue { impl KindOps for Lvalue { /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel /// cleanup. If an @T lvalue is copied, we must increment the reference count. - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { + fn post_store<'r, 'blk, 'tcx>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + val: ValueRef, + ty: Ty<'tcx>) + -> &'blk Block { let _icx = push_ctxt("::post_store"); if bcx.fcx.type_needs_drop(ty) { // cancel cleanup of affine values by drop-filling the memory let () = drop_done_fill_mem(bcx, val, ty); - bcx + bcx.bl } else { - bcx + bcx.bl } } @@ -314,11 +314,11 @@ impl KindOps for Lvalue { } impl KindOps for Expr { - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { + fn post_store<'r, 'blk, 'tcx>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + val: ValueRef, + ty: Ty<'tcx>) + -> &'blk Block { match *self { LvalueExpr => Lvalue.post_store(bcx, val, ty), RvalueExpr(ref r) => r.post_store(bcx, val, ty), @@ -342,7 +342,7 @@ impl<'tcx> Datum<'tcx, Rvalue> { /// longer an rvalue datum; hence, this function consumes the datum and returns the contained /// ValueRef. pub fn add_clean<'a>(self, - fcx: &FunctionContext<'a, 'tcx>, + fcx: &mut FunctionContext<'a, 'tcx>, scope: cleanup::ScopeId) -> ValueRef { add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty); @@ -351,17 +351,15 @@ impl<'tcx> Datum<'tcx, Rvalue> { /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`. - pub fn to_lvalue_datum_in_scope<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &str, - scope: cleanup::ScopeId) - -> DatumBlock<'blk, 'tcx, Lvalue> { - let fcx = bcx.fcx; - + pub fn to_lvalue_datum_in_scope<'r, 'blk>(self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + name: &str, + scope: cleanup::ScopeId) + -> DatumBlock<'blk, 'tcx, Lvalue> { match self.kind.mode { ByRef => { - add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty); - DatumBlock::new(bcx, Datum::new(self.val, self.ty, Lvalue)) + add_rvalue_clean(ByRef, bcx.fcx, scope, self.val, self.ty); + DatumBlock::new(bcx.bl, Datum::new(self.val, self.ty, Lvalue)) } ByValue => { @@ -372,32 +370,32 @@ impl<'tcx> Datum<'tcx, Rvalue> { } } - pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { + pub fn to_ref_datum<'r, 'blk>(self, bcx: &mut BlockContext<'r, 'blk, 'tcx>) + -> DatumBlock<'blk, 'tcx, Rvalue> { let mut bcx = bcx; match self.kind.mode { - ByRef => DatumBlock::new(bcx, self), + ByRef => DatumBlock::new(bcx.bl, self), ByValue => { let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref"); - bcx = self.store_to(bcx, scratch.val); + let bcx = self.store_to(bcx, scratch.val); DatumBlock::new(bcx, scratch) } } } - pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { + pub fn to_appropriate_datum<'r, 'blk>(self, bcx: &mut BlockContext<'r, 'blk, 'tcx>) + -> DatumBlock<'blk, 'tcx, Rvalue> { match self.appropriate_rvalue_mode(bcx.ccx()) { ByRef => { self.to_ref_datum(bcx) } ByValue => { match self.kind.mode { - ByValue => DatumBlock::new(bcx, self), + ByValue => DatumBlock::new(bcx.bl, self), ByRef => { let llval = load_ty(bcx, self.val, self.ty); call_lifetime_end(bcx, self.val); - DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue))) + DatumBlock::new(bcx.bl, Datum::new(llval, self.ty, Rvalue::new(ByValue))) } } } @@ -412,34 +410,34 @@ impl<'tcx> Datum<'tcx, Rvalue> { /// here since we can `match self.kind` rather than having to implement /// generic methods in `KindOps`.) impl<'tcx> Datum<'tcx, Expr> { - fn match_kind(self, if_lvalue: F, if_rvalue: G) -> R where - F: FnOnce(Datum<'tcx, Lvalue>) -> R, - G: FnOnce(Datum<'tcx, Rvalue>) -> R, + fn match_kind(self, t: T, if_lvalue: F, if_rvalue: G) -> R where + F: FnOnce(T, Datum<'tcx, Lvalue>) -> R, + G: FnOnce(T, Datum<'tcx, Rvalue>) -> R, { let Datum { val, ty, kind } = self; match kind { - LvalueExpr => if_lvalue(Datum::new(val, ty, Lvalue)), - RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)), + LvalueExpr => if_lvalue(t, Datum::new(val, ty, Lvalue)), + RvalueExpr(r) => if_rvalue(t, Datum::new(val, ty, r)), } } /// Asserts that this datum *is* an lvalue and returns it. #[allow(dead_code)] // potentially useful - pub fn assert_lvalue(self, bcx: Block) -> Datum<'tcx, Lvalue> { - self.match_kind( - |d| d, - |_| bcx.sess().bug("assert_lvalue given rvalue")) + pub fn assert_lvalue(self, bcx: &mut BlockContext) -> Datum<'tcx, Lvalue> { + self.match_kind((), + |_, d| d, + |_, _| bcx.sess().bug("assert_lvalue given rvalue")) } - pub fn store_to_dest<'blk>(self, - bcx: Block<'blk, 'tcx>, - dest: expr::Dest, - expr_id: ast::NodeId) - -> Block<'blk, 'tcx> { + pub fn store_to_dest<'r, 'blk>(self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + dest: expr::Dest, + expr_id: ast::NodeId) + -> &'blk Block { match dest { expr::Ignore => { self.add_clean_if_rvalue(bcx, expr_id); - bcx + bcx.bl } expr::SaveIn(addr) => { self.store_to(bcx, addr) @@ -449,54 +447,54 @@ impl<'tcx> Datum<'tcx, Expr> { /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value /// that may need drop. - pub fn add_clean_if_rvalue<'blk>(self, - bcx: Block<'blk, 'tcx>, - expr_id: ast::NodeId) { - self.match_kind( - |_| { /* Nothing to do, cleanup already arranged */ }, - |r| { + pub fn add_clean_if_rvalue<'r, 'blk>(self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + expr_id: ast::NodeId) { + self.match_kind((), + |_, _| { /* Nothing to do, cleanup already arranged */ }, + |_, r| { let scope = cleanup::temporary_scope(bcx.tcx(), expr_id); r.add_clean(bcx.fcx, scope); }) } - pub fn to_lvalue_datum<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &str, - expr_id: ast::NodeId) - -> DatumBlock<'blk, 'tcx, Lvalue> { + pub fn to_lvalue_datum<'r, 'blk>(self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + name: &str, + expr_id: ast::NodeId) + -> DatumBlock<'blk, 'tcx, Lvalue> { debug!("to_lvalue_datum self: {}", self.to_string(bcx.ccx())); - self.match_kind( - |l| DatumBlock::new(bcx, l), - |r| { + self.match_kind(bcx, + |bcx, l| DatumBlock::new(bcx.bl, l), + |bcx, r| { let scope = cleanup::temporary_scope(bcx.tcx(), expr_id); r.to_lvalue_datum_in_scope(bcx, name, scope) }) } /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled). - pub fn to_rvalue_datum<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &'static str) - -> DatumBlock<'blk, 'tcx, Rvalue> { - self.match_kind( - |l| { + pub fn to_rvalue_datum<'r, 'blk>(self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + name: &'static str) + -> DatumBlock<'blk, 'tcx, Rvalue> { + self.match_kind(bcx, + |bcx, l| { let mut bcx = bcx; match l.appropriate_rvalue_mode(bcx.ccx()) { ByRef => { let scratch = rvalue_scratch_datum(bcx, l.ty, name); - bcx = l.store_to(bcx, scratch.val); + let bcx = l.store_to(bcx, scratch.val); DatumBlock::new(bcx, scratch) } ByValue => { let v = load_ty(bcx, l.val, l.ty); - bcx = l.kind.post_store(bcx, l.val, l.ty); + let bcx = l.kind.post_store(bcx, l.val, l.ty); DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue))) } } }, - |r| DatumBlock::new(bcx, r)) + |bcx, r| DatumBlock::new(bcx.bl, r)) } } @@ -517,15 +515,17 @@ impl<'tcx> Datum<'tcx, Lvalue> { // datum may also be unsized _without the size information_. It is the // callers responsibility to package the result in some way to make a valid // datum in that case (e.g., by making a fat pointer or opened pair). - pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>, - gep: F) - -> Datum<'tcx, Lvalue> where - F: FnOnce(ValueRef) -> ValueRef, + pub fn get_element<'r, 'blk, F>(&self, bcx: &mut BlockContext<'r, 'blk, 'tcx>, ty: Ty<'tcx>, + gep: F) + -> Datum<'tcx, Lvalue> where + F: for<'a> FnOnce(&mut BlockContext<'a, 'blk, 'tcx>, ValueRef) -> ValueRef, { let val = if type_is_sized(bcx.tcx(), self.ty) { - gep(self.val) + gep(bcx, self.val) } else { - gep(Load(bcx, expr::get_dataptr(bcx, self.val))) + let p = expr::get_dataptr(bcx, self.val); + let ld = Load(bcx, p); + gep(bcx, ld) }; Datum { val: val, @@ -534,8 +534,8 @@ impl<'tcx> Datum<'tcx, Lvalue> { } } - pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>) - -> (ValueRef, ValueRef) { + pub fn get_vec_base_and_len<'r, 'blk>(&self, bcx: &mut BlockContext<'r, 'blk, 'tcx>) + -> (ValueRef, ValueRef) { //! Converts a vector into the slice pair. tvec::get_base_and_len(bcx, self.val, self.ty) @@ -556,10 +556,10 @@ impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { /// Moves or copies this value into a new home, as appropriate depending on the type of the /// datum. This method consumes the datum, since it would be incorrect to go on using the datum /// if the value represented is affine (and hence the value is moved). - pub fn store_to<'blk>(self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { + pub fn store_to<'r, 'blk>(self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + dst: ValueRef) + -> &'blk Block { self.shallow_copy_raw(bcx, dst); self.kind.post_store(bcx, self.val, self.ty) @@ -573,14 +573,14 @@ impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { /// source value has been copied but not zeroed. Public methods are `store_to` (if you no /// longer need the source value) or `shallow_copy` (if you wish the source value to remain /// valid). - fn shallow_copy_raw<'blk>(&self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { + fn shallow_copy_raw<'r, 'blk>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + dst: ValueRef) + -> &'blk Block { let _icx = push_ctxt("copy_to_no_check"); if type_is_zero_size(bcx.ccx(), self.ty) { - return bcx; + return bcx.bl; } if self.kind.is_by_ref() { @@ -589,16 +589,16 @@ impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { store_ty(bcx, self.val, dst, self.ty); } - return bcx; + return bcx.bl; } /// Copies the value into a new location. This function always preserves the existing datum as /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine /// values (since they must never be duplicated). - pub fn shallow_copy<'blk>(&self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { + pub fn shallow_copy<'r, 'blk>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + dst: ValueRef) + -> &'blk Block { /*! * Copies the value into a new location. This function always * preserves the existing datum as a valid value. Therefore, @@ -630,7 +630,7 @@ impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { /// responsibility to cleanup the value). For this to work, the value must be something /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is /// naturally passed around by value, and not by reference. - pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef { + pub fn to_llscalarish<'r, 'blk>(self, bcx: &mut BlockContext<'r, 'blk, 'tcx>) -> ValueRef { assert!(!bcx.fcx.type_needs_drop(self.ty)); assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue); if self.kind.is_by_ref() { @@ -640,14 +640,14 @@ impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { } } - pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef { + pub fn to_llbool<'r, 'blk>(self, bcx: &mut BlockContext<'r, 'blk, 'tcx>) -> ValueRef { assert!(ty::type_is_bool(self.ty)); self.to_llscalarish(bcx) } } impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> { - pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>) + pub fn new(bcx: &'blk Block, datum: Datum<'tcx, K>) -> DatumBlock<'blk, 'tcx, K> { DatumBlock { bcx: bcx, datum: datum } } @@ -660,15 +660,16 @@ impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> { } impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> { - pub fn store_to_dest(self, - dest: expr::Dest, - expr_id: ast::NodeId) -> Block<'blk, 'tcx> { + pub fn store_to_dest<'r>(self, + fcx: &mut FunctionContext<'blk, 'tcx>, + dest: expr::Dest, + expr_id: ast::NodeId) -> &'blk Block { let DatumBlock { bcx, datum } = self; - datum.store_to_dest(bcx, dest, expr_id) + datum.store_to_dest(&mut bcx.with_fcx(fcx), dest, expr_id) } - pub fn to_llbool(self) -> Result<'blk, 'tcx> { + pub fn to_llbool(self, fcx: &mut FunctionContext<'blk, 'tcx>) -> Result<'blk> { let DatumBlock { datum, bcx } = self; - Result::new(bcx, datum.to_llbool(bcx)) + Result::new(bcx, datum.to_llbool(&mut bcx.with_fcx(fcx))) } } diff --git a/src/librustc_trans/trans/debuginfo/create_scope_map.rs b/src/librustc_trans/trans/debuginfo/create_scope_map.rs index 9af22b788b77b..dbd9ed1637a50 100644 --- a/src/librustc_trans/trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/trans/debuginfo/create_scope_map.rs @@ -511,4 +511,4 @@ fn walk_expr(cx: &CrateContext, } } } -} \ No newline at end of file +} diff --git a/src/librustc_trans/trans/debuginfo/metadata.rs b/src/librustc_trans/trans/debuginfo/metadata.rs index ab86cd7cdde59..972f30723870d 100644 --- a/src/librustc_trans/trans/debuginfo/metadata.rs +++ b/src/librustc_trans/trans/debuginfo/metadata.rs @@ -27,7 +27,7 @@ use metadata::csearch; use middle::pat_util; use middle::subst::{self, Substs}; use trans::{type_of, adt, machine, monomorphize}; -use trans::common::{self, CrateContext, FunctionContext, NormalizingClosureTyper, Block}; +use trans::common::{self, CrateContext, FunctionContext, NormalizingClosureTyper, BlockContext}; use trans::_match::{BindingInfo, TrByCopy, TrByMove, TrByRef}; use trans::type_::Type; use middle::ty::{self, Ty, ClosureTyper}; @@ -899,9 +899,9 @@ pub fn file_metadata(cx: &CrateContext, full_path: &str) -> DIFile { } /// Finds the scope metadata node for the given AST node. -pub fn scope_metadata(fcx: &FunctionContext, - node_id: ast::NodeId, - error_reporting_span: Span) +pub fn scope_metadata(fcx: &mut FunctionContext, + node_id: ast::NodeId, + error_reporting_span: Span) -> DIScope { let scope_map = &fcx.debug_context .get_ref(fcx.ccx, error_reporting_span) @@ -1919,8 +1919,8 @@ pub fn create_global_var_metadata(cx: &CrateContext, /// This function assumes that there's a datum for each pattern component of the /// local in `bcx.fcx.lllocals`. /// Adds the created metadata nodes directly to the crate's IR. -pub fn create_local_var_metadata(bcx: Block, local: &ast::Local) { - if bcx.unreachable.get() || +pub fn create_local_var_metadata(bcx: &mut BlockContext, local: &ast::Local) { + if bcx.bl.unreachable.get() || fn_should_be_ignored(bcx.fcx) || bcx.sess().opts.debuginfo != FullDebugInfo { return; @@ -1928,30 +1928,30 @@ pub fn create_local_var_metadata(bcx: Block, local: &ast::Local) { let cx = bcx.ccx(); let def_map = &cx.tcx().def_map; - let locals = bcx.fcx.lllocals.borrow(); - pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, var_ident| { - let datum = match locals.get(&node_id) { - Some(datum) => datum, - None => { - bcx.sess().span_bug(span, - &format!("no entry in lllocals table for {}", - node_id)); + let (ty, v) = { + let datum = match bcx.fcx.lllocals.get(&node_id) { + Some(datum) => datum, + None => { + bcx.sess().span_bug(span, + &format!("no entry in lllocals table for {}", + node_id)); + } + }; + if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { + cx.sess().span_bug(span, "debuginfo::create_local_var_metadata() - \ + Referenced variable location is not an alloca!"); } + (datum.ty, datum.val) }; - if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { - cx.sess().span_bug(span, "debuginfo::create_local_var_metadata() - \ - Referenced variable location is not an alloca!"); - } - let scope_metadata = scope_metadata(bcx.fcx, node_id, span); declare_local(bcx, var_ident.node.name, - datum.ty, + ty, scope_metadata, - VariableAccess::DirectVariable { alloca: datum.val }, + VariableAccess::DirectVariable { alloca: v }, VariableKind::LocalVariable, span); }) @@ -1960,13 +1960,13 @@ pub fn create_local_var_metadata(bcx: Block, local: &ast::Local) { /// Creates debug information for a variable captured in a closure. /// /// Adds the created metadata nodes directly to the crate's IR. -pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node_id: ast::NodeId, - env_pointer: ValueRef, - env_index: usize, - captured_by_ref: bool, - span: Span) { - if bcx.unreachable.get() || +pub fn create_captured_var_metadata<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + node_id: ast::NodeId, + env_pointer: ValueRef, + env_index: usize, + captured_by_ref: bool, + span: Span) { + if bcx.bl.unreachable.get() || fn_should_be_ignored(bcx.fcx) || bcx.sess().opts.debuginfo != FullDebugInfo { return; @@ -2049,10 +2049,10 @@ pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// match-statement arm. /// /// Adds the created metadata nodes directly to the crate's IR. -pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - variable_name: ast::Name, - binding: BindingInfo<'tcx>) { - if bcx.unreachable.get() || +pub fn create_match_binding_metadata<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + variable_name: ast::Name, + binding: BindingInfo<'tcx>) { + if bcx.bl.unreachable.get() || fn_should_be_ignored(bcx.fcx) || bcx.sess().opts.debuginfo != FullDebugInfo { return; @@ -2094,8 +2094,8 @@ pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// This function assumes that there's a datum for each pattern component of the /// argument in `bcx.fcx.lllocals`. /// Adds the created metadata nodes directly to the crate's IR. -pub fn create_argument_metadata(bcx: Block, arg: &ast::Arg) { - if bcx.unreachable.get() || +pub fn create_argument_metadata(bcx: &mut BlockContext, arg: &ast::Arg) { + if bcx.bl.unreachable.get() || fn_should_be_ignored(bcx.fcx) || bcx.sess().opts.debuginfo != FullDebugInfo { return; @@ -2107,23 +2107,24 @@ pub fn create_argument_metadata(bcx: Block, arg: &ast::Arg) { .debug_context .get_ref(bcx.ccx(), arg.pat.span) .fn_metadata; - let locals = bcx.fcx.lllocals.borrow(); - pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, var_ident| { - let datum = match locals.get(&node_id) { - Some(v) => v, - None => { - bcx.sess().span_bug(span, - &format!("no entry in lllocals table for {}", - node_id)); + let (ty, v) = { + let datum = match bcx.fcx.lllocals.get(&node_id) { + Some(v) => v, + None => { + bcx.sess().span_bug(span, + &format!("no entry in lllocals table for {}", + node_id)); + } + }; + + if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { + bcx.sess().span_bug(span, "debuginfo::create_argument_metadata() - \ + Referenced variable location is not an alloca!"); } + (datum.ty, datum.val) }; - if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { - bcx.sess().span_bug(span, "debuginfo::create_argument_metadata() - \ - Referenced variable location is not an alloca!"); - } - let argument_index = { let counter = &bcx .fcx @@ -2137,9 +2138,9 @@ pub fn create_argument_metadata(bcx: Block, arg: &ast::Arg) { declare_local(bcx, var_ident.node.name, - datum.ty, + ty, scope_metadata, - VariableAccess::DirectVariable { alloca: datum.val }, + VariableAccess::DirectVariable { alloca: v }, VariableKind::ArgumentVariable(argument_index), span); }) diff --git a/src/librustc_trans/trans/debuginfo/mod.rs b/src/librustc_trans/trans/debuginfo/mod.rs index 4e5407016ba9b..008f69f1c78f7 100644 --- a/src/librustc_trans/trans/debuginfo/mod.rs +++ b/src/librustc_trans/trans/debuginfo/mod.rs @@ -26,7 +26,7 @@ use llvm::{ModuleRef, ContextRef, ValueRef}; use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilderRef, DISubprogram, DIArray, DIDescriptor, FlagPrototyped}; use middle::subst::{self, Substs}; -use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, Block}; +use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, BlockContext}; use trans; use trans::monomorphize; use middle::ty::{self, Ty, ClosureTyper}; @@ -535,13 +535,13 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - variable_name: ast::Name, - variable_type: Ty<'tcx>, - scope_metadata: DIScope, - variable_access: VariableAccess, - variable_kind: VariableKind, - span: Span) { +fn declare_local<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: DIScope, + variable_access: VariableAccess, + variable_kind: VariableKind, + span: Span) { let cx: &CrateContext = bcx.ccx(); let filename = span_start(cx, span).file.name.clone(); @@ -586,7 +586,7 @@ fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, metadata, address_operations.as_ptr(), address_operations.len() as c_uint, - bcx.llbb); + bcx.bl.llbb); llvm::LLVMSetInstDebugLocation(trans::build::B(bcx).llbuilder, instr); } @@ -613,7 +613,7 @@ pub enum DebugLoc { } impl DebugLoc { - pub fn apply(&self, fcx: &FunctionContext) { + pub fn apply(&self, fcx: &mut FunctionContext) { match *self { DebugLoc::At(node_id, span) => { source_loc::set_source_location(fcx, node_id, span); diff --git a/src/librustc_trans/trans/debuginfo/source_loc.rs b/src/librustc_trans/trans/debuginfo/source_loc.rs index 981a23fd664a9..eb6e4df621efa 100644 --- a/src/librustc_trans/trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/trans/debuginfo/source_loc.rs @@ -89,7 +89,7 @@ pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). The node_id /// parameter is used to reliably find the correct visibility scope for the code /// position. -pub fn set_source_location(fcx: &FunctionContext, +pub fn set_source_location(fcx: &mut FunctionContext, node_id: ast::NodeId, span: Span) { match fcx.debug_context { @@ -109,53 +109,61 @@ pub fn set_source_location(fcx: &FunctionContext, debug!("set_source_location: {}", cx.sess().codemap().span_to_string(span)); - if function_debug_context.source_locations_enabled.get() { - let loc = span_start(cx, span); - let scope = scope_metadata(fcx, node_id, span); - - set_debug_location(cx, InternalDebugLocation::new(scope, - loc.line, - loc.col.to_usize())); - } else { + if !function_debug_context.source_locations_enabled.get() { set_debug_location(cx, UnknownLocation); + return; } } } + + let loc = span_start(fcx.ccx, span); + let scope = scope_metadata(fcx, node_id, span); + + set_debug_location(fcx.ccx, InternalDebugLocation::new(scope, + loc.line, + loc.col.to_usize())); } /// This function makes sure that all debug locations emitted while executing /// `wrapped_function` are set to the given `debug_loc`. -pub fn with_source_location_override(fcx: &FunctionContext, - debug_loc: DebugLoc, - wrapped_function: F) -> R - where F: FnOnce() -> R +pub fn with_source_location_override<'blk, 'tcx, F, R>(fcx: &mut FunctionContext<'blk, 'tcx>, + debug_loc: DebugLoc, + wrapped_function: F) -> R + where F: FnOnce(&mut FunctionContext<'blk, 'tcx>) -> R { - match fcx.debug_context { + let slo = match fcx.debug_context { FunctionDebugContext::DebugInfoDisabled => { - wrapped_function() + return wrapped_function(fcx) } FunctionDebugContext::FunctionWithoutDebugInfo => { set_debug_location(fcx.ccx, UnknownLocation); - wrapped_function() + return wrapped_function(fcx) } FunctionDebugContext::RegularContext(box ref function_debug_context) => { - if function_debug_context.source_location_override.get() { - wrapped_function() - } else { - debug_loc.apply(fcx); - function_debug_context.source_location_override.set(true); - let result = wrapped_function(); - function_debug_context.source_location_override.set(false); - result - } + function_debug_context.source_location_override.get() } + }; + + if slo { + return wrapped_function(fcx); + } + + debug_loc.apply(fcx); + + if let FunctionDebugContext::RegularContext(box ref fdc) = fcx.debug_context { + fdc.source_location_override.set(true); + } + let result = wrapped_function(fcx); + if let FunctionDebugContext::RegularContext(box ref fdc) = fcx.debug_context { + fdc.source_location_override.set(false); } + result } /// Clears the current debug location. /// /// Instructions generated hereafter won't be assigned a source location. -pub fn clear_source_location(fcx: &FunctionContext) { +pub fn clear_source_location(fcx: &mut FunctionContext) { if fn_should_be_ignored(fcx) { return; } @@ -169,7 +177,7 @@ pub fn clear_source_location(fcx: &FunctionContext) { /// they are disabled when beginning to translate a new function. This functions /// switches source location emitting on and must therefore be called before the /// first real statement/expression of the function is translated. -pub fn start_emitting_source_locations(fcx: &FunctionContext) { +pub fn start_emitting_source_locations(fcx: &mut FunctionContext) { match fcx.debug_context { FunctionDebugContext::RegularContext(box ref data) => { data.source_locations_enabled.set(true) diff --git a/src/librustc_trans/trans/debuginfo/utils.rs b/src/librustc_trans/trans/debuginfo/utils.rs index 0c12f6ed095f1..2b3f8c1d3e2c9 100644 --- a/src/librustc_trans/trans/debuginfo/utils.rs +++ b/src/librustc_trans/trans/debuginfo/utils.rs @@ -78,7 +78,7 @@ pub fn DIB(cx: &CrateContext) -> DIBuilderRef { cx.dbg_cx().as_ref().unwrap().builder } -pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool { +pub fn fn_should_be_ignored(fcx: &mut FunctionContext) -> bool { match fcx.debug_context { FunctionDebugContext::RegularContext(_) => false, _ => true diff --git a/src/librustc_trans/trans/expr.rs b/src/librustc_trans/trans/expr.rs index 3ebb56d1dd899..abdcf55ac6ac6 100644 --- a/src/librustc_trans/trans/expr.rs +++ b/src/librustc_trans/trans/expr.rs @@ -111,11 +111,12 @@ impl Dest { /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate /// better optimized LLVM code. -pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; +pub fn trans_into<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + dest: Dest) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); @@ -138,10 +139,11 @@ pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Cast pointer to destination, because constants // have different types. let lldest = PointerCast(bcx, lldest, val_ty(global)); - memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr)); + let expr_ty = expr_ty_adjusted(bcx, expr); + memcpy_ty(bcx, lldest, global, expr_ty); } // Don't do anything in the Ignore case, consts don't need drop. - return bcx; + return bcx.bl; } else { // The only way we're going to see a `const` at this point is if // it prefers in-place instantiation, likely because it contains @@ -156,19 +158,20 @@ pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // inside the current AST scope. // These should record no cleanups anyways, `const` // can't have destructors. - let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(), + let scopes = mem::replace(&mut bcx.fcx.scopes, vec![]); // Lock emitted debug locations to the location of // the constant reference expression. - debuginfo::with_source_location_override(bcx.fcx, - expr.debug_loc(), - || { - bcx = trans_into(bcx, const_expr, dest) + let bl = bcx.bl; + bcx.bl = debuginfo::with_source_location_override(bcx.fcx, + expr.debug_loc(), + |fcx| { + trans_into(&mut bl.with_fcx(fcx), const_expr, dest) }); - let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(), + let scopes = mem::replace(&mut bcx.fcx.scopes, scopes); assert!(scopes.is_empty()); - return bcx; + return bcx.bl; } _ => {} } @@ -187,9 +190,9 @@ pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc); let kind = ty::expr_kind(bcx.tcx(), expr); - bcx = match kind { + let bl = match kind { ty::LvalueExpr | ty::RvalueDatumExpr => { - trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id) + trans_unadjusted(bcx, expr).store_to_dest(bcx.fcx, dest, expr.id) } ty::RvalueDpsExpr => { trans_rvalue_dps_unadjusted(bcx, expr, dest) @@ -199,19 +202,19 @@ pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } }; - bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id) + bcx.fcx.pop_and_trans_ast_cleanup_scope(bl, expr.id) } /// Translates an expression, returning a datum (and new block) encapsulating the result. When /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the /// stack. -pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - debug!("trans(expr={})", bcx.expr_to_string(expr)); +pub fn trans<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); - let mut bcx = bcx; - let fcx = bcx.fcx; + debug!("trans(expr={})", bcx.expr_to_string(expr)); let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap(); let adjusted_global = !qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS); let global = if !qualif.intersects( @@ -229,7 +232,7 @@ pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let llty = type_of::type_of(bcx.ccx(), const_ty); let global = PointerCast(bcx, global, llty.ptr_to()); let datum = Datum::new(global, const_ty, Lvalue); - return DatumBlock::new(bcx, datum.to_expr_datum()); + return DatumBlock::new(bcx.bl, datum.to_expr_datum()); } // Otherwise, keep around and perform adjustments, if needed. @@ -269,7 +272,7 @@ pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr.id, expr.span, false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); + bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc); let datum = match global { Some(rvalue) => rvalue.to_expr_datum(), None => unpack_datum!(bcx, trans_unadjusted(bcx, expr)) @@ -279,21 +282,27 @@ pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } else { unpack_datum!(bcx, apply_adjustments(bcx, expr, datum)) }; - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id); - return DatumBlock::new(bcx, datum); + let bl = bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx.bl, expr.id); + return DatumBlock::new(bl, datum); } -pub fn get_len(bcx: Block, fat_ptr: ValueRef) -> ValueRef { +pub fn get_len(bcx: &mut BlockContext, fat_ptr: ValueRef) -> ValueRef { GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_EXTRA]) } -pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { +pub fn get_dataptr(bcx: &mut BlockContext, fat_ptr: ValueRef) -> ValueRef { GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_ADDR]) } -pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) { - Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr)); - Store(bcx, Load(bcx, get_len(bcx, src_ptr)), get_len(bcx, dst_ptr)); +pub fn copy_fat_ptr(bcx: &mut BlockContext, src_ptr: ValueRef, dst_ptr: ValueRef) { + let sp = get_dataptr(bcx, src_ptr); + let ld = Load(bcx, sp); + let dp = get_dataptr(bcx, dst_ptr); + Store(bcx, ld, dp); + let sl = get_len(bcx, src_ptr); + let ld = Load(bcx, sl); + let dl = get_len(bcx, dst_ptr); + Store(bcx, ld, dl); } /// Retrieve the information we are losing (making dynamic) in an unsizing @@ -334,16 +343,17 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted /// translation of `expr`. -fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - datum: Datum<'tcx, Expr>) - -> DatumBlock<'blk, 'tcx, Expr> +fn apply_adjustments<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + datum: Datum<'tcx, Expr>) + -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let mut datum = datum; let adjustment = match bcx.tcx().adjustments.borrow().get(&expr.id).cloned() { None => { - return DatumBlock::new(bcx, datum); + return DatumBlock::new(bcx.bl, datum); } Some(adj) => { adj } }; @@ -394,11 +404,13 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if skip_reborrows == 0 && adj.autoref.is_some() { if !type_is_sized(bcx.tcx(), datum.ty) { // Arrange cleanup - let lval = unpack_datum!(bcx, - datum.to_lvalue_datum(bcx, "ref_fat_ptr", expr.id)); - datum = unpack_datum!(bcx, ref_fat_ptr(bcx, lval)); + let rfp = datum.to_lvalue_datum(bcx, "ref_fat_ptr", expr.id); + let lval = unpack_datum!(bcx, rfp); + let rfp = ref_fat_ptr(bcx, lval); + datum = unpack_datum!(bcx, rfp); } else { - datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr)); + let ar = auto_ref(bcx, datum, expr); + datum = unpack_datum!(bcx, ar); } } @@ -408,8 +420,8 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // the `datum.to_rvalue_datum` call below will emit code to zero // the drop flag when moving out of the L-value). If we are an // R-value, then we do not need to schedule cleanup. - let source_datum = unpack_datum!(bcx, - datum.to_rvalue_datum(bcx, "__coerce_source")); + let rvd = datum.to_rvalue_datum(bcx, "__coerce_source"); + let source_datum = unpack_datum!(bcx, rvd); let target = bcx.monomorphize(&target); let llty = type_of::type_of(bcx.ccx(), target); @@ -418,22 +430,23 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let scratch = alloca_no_lifetime(bcx, llty, "__coerce_target"); let target_datum = Datum::new(scratch, target, Rvalue::new(ByRef)); - bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum); + bcx.bl = coerce_unsized(bcx, expr.span, source_datum, target_datum); datum = Datum::new(scratch, target, RvalueExpr(Rvalue::new(ByRef))); } } } debug!("after adjustments, datum={}", datum.to_string(bcx.ccx())); - DatumBlock::new(bcx, datum) + DatumBlock::new(bcx.bl, datum) } -fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - span: codemap::Span, - source: Datum<'tcx, Rvalue>, - target: Datum<'tcx, Rvalue>) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; +fn coerce_unsized<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + span: codemap::Span, + source: Datum<'tcx, Rvalue>, + target: Datum<'tcx, Rvalue>) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); debug!("coerce_unsized({} -> {})", source.to_string(bcx.ccx()), target.to_string(bcx.ccx())); @@ -452,8 +465,10 @@ fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // to use a different vtable. In that case, we want to // load out the original data pointer so we can repackage // it. - (Load(bcx, get_dataptr(bcx, source.val)), - Some(Load(bcx, get_len(bcx, source.val)))) + let dp = get_dataptr(bcx, source.val); + let dl = get_len(bcx, source.val); + (Load(bcx, dp), + Some(Load(bcx, dl))) } else { let val = if source.kind.is_by_ref() { load_ty(bcx, source.val, source.ty) @@ -471,8 +486,10 @@ fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to(); let base = PointerCast(bcx, base, ptr_ty); - Store(bcx, base, get_dataptr(bcx, target.val)); - Store(bcx, info, get_len(bcx, target.val)); + let dp = get_dataptr(bcx, target.val); + Store(bcx, base, dp); + let l = get_len(bcx, target.val); + Store(bcx, info, l); } // This can be extended to enums and tuples in the future. @@ -546,7 +563,7 @@ fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, source.ty.repr(bcx.tcx()), target.ty.repr(bcx.tcx()))) } - bcx + bcx.bl } /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory @@ -556,21 +573,23 @@ fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// something like `x().f` is translated into roughly the equivalent of /// /// { tmp = x(); tmp.f } -pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - name: &str) - -> DatumBlock<'blk, 'tcx, Lvalue> { - let mut bcx = bcx; +pub fn trans_to_lvalue<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + name: &str) + -> DatumBlock<'blk, 'tcx, Lvalue> { + let mut bcx = &mut bl.with_fcx(fcx); let datum = unpack_datum!(bcx, trans(bcx, expr)); return datum.to_lvalue_datum(bcx, name, expr.id); } /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this /// directly. -fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; +fn trans_unadjusted<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); debug!("trans_unadjusted(expr={})", bcx.expr_to_string(expr)); let _indenter = indenter(); @@ -583,22 +602,23 @@ fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, trans_datum_unadjusted(bcx, expr) }); - DatumBlock {bcx: bcx, datum: datum} + DatumBlock {bcx: bcx.bl, datum: datum} } ty::RvalueStmtExpr => { - bcx = trans_rvalue_stmt_unadjusted(bcx, expr); - nil(bcx, expr_ty(bcx, expr)) + let bl = trans_rvalue_stmt_unadjusted(bcx, expr); + let expr_ty = expr_ty(bcx, expr); + nil(&mut bl.with_fcx(bcx.fcx), expr_ty) } ty::RvalueDpsExpr => { let ty = expr_ty(bcx, expr); if type_is_zero_size(bcx.ccx(), ty) { - bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore); - nil(bcx, ty) + let bl = trans_rvalue_dps_unadjusted(bcx, expr, Ignore); + nil(&mut bl.with_fcx(bcx.fcx), ty) } else { let scratch = rvalue_scratch_datum(bcx, ty, ""); - bcx = trans_rvalue_dps_unadjusted( + bcx.bl = trans_rvalue_dps_unadjusted( bcx, expr, SaveIn(scratch.val)); // Note: this is not obviously a good idea. It causes @@ -613,24 +633,24 @@ fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let scratch = unpack_datum!( bcx, scratch.to_appropriate_datum(bcx)); - DatumBlock::new(bcx, scratch.to_expr_datum()) + DatumBlock::new(bcx.bl, scratch.to_expr_datum()) } } }; - fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>) + fn nil<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, ty: Ty<'tcx>) -> DatumBlock<'blk, 'tcx, Expr> { let llval = C_undef(type_of::type_of(bcx.ccx(), ty)); let datum = immediate_rvalue(llval, ty); - DatumBlock::new(bcx, datum.to_expr_datum()) + DatumBlock::new(bcx.bl, datum.to_expr_datum()) } } -fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - let fcx = bcx.fcx; +fn trans_datum_unadjusted<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); let _icx = push_ctxt("trans_datum_unadjusted"); match expr.node { @@ -638,7 +658,8 @@ fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, trans(bcx, &**e) } ast::ExprPath(..) => { - trans_def(bcx, expr, bcx.def(expr.id)) + let def = bcx.def(expr.id); + trans_def(bcx, expr, def) } ast::ExprField(ref base, ident) => { trans_rec_field(bcx, &**base, ident.node.name) @@ -678,11 +699,11 @@ fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, x.id, x.span, false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); + bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc); let datum = unpack_datum!( bcx, tvec::trans_slice_vec(bcx, expr, &**x)); - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id); - DatumBlock::new(bcx, datum) + let bl = bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx.bl, x.id); + DatumBlock::new(bl, datum) } _ => { trans_addr_of(bcx, expr, &**x) @@ -703,13 +724,14 @@ fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - base: &ast::Expr, - get_idx: F) - -> DatumBlock<'blk, 'tcx, Expr> where +fn trans_field<'r, 'blk, 'tcx, F> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + base: &ast::Expr, + get_idx: F) + -> DatumBlock<'blk, 'tcx, Expr> where F: FnOnce(&'blk ty::ctxt<'tcx>, &[ty::field<'tcx>]) -> usize, { - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let _icx = push_ctxt("trans_rec_field"); let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field")); @@ -720,51 +742,55 @@ fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, let d = base_datum.get_element( bcx, field_tys[ix].mt.ty, - |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, ix)); + |bcx, srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, ix)); if type_is_sized(bcx.tcx(), d.ty) { - DatumBlock { datum: d.to_expr_datum(), bcx: bcx } + DatumBlock { datum: d.to_expr_datum(), bcx: bcx.bl } } else { let scratch = rvalue_scratch_datum(bcx, d.ty, ""); - Store(bcx, d.val, get_dataptr(bcx, scratch.val)); - let info = Load(bcx, get_len(bcx, base_datum.val)); - Store(bcx, info, get_len(bcx, scratch.val)); + let dp = get_dataptr(bcx, scratch.val); + Store(bcx, d.val, dp); + let bl = get_len(bcx, base_datum.val); + let info = Load(bcx, bl); + let dl = get_len(bcx, scratch.val); + Store(bcx, info, dl); // Always generate an lvalue datum, because this pointer doesn't own // the data and cleanup is scheduled elsewhere. - DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr)) + DatumBlock::new(bcx.bl, Datum::new(scratch.val, scratch.ty, LvalueExpr)) } }) } /// Translates `base.field`. -fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - base: &ast::Expr, - field: ast::Name) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_rec_field<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + base: &ast::Expr, + field: ast::Name) + -> DatumBlock<'blk, 'tcx, Expr> { trans_field(bcx, base, |tcx, field_tys| ty::field_idx_strict(tcx, field, field_tys)) } /// Translates `base.`. -fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - base: &ast::Expr, - idx: usize) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_rec_tup_field<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + base: &ast::Expr, + idx: usize) + -> DatumBlock<'blk, 'tcx, Expr> { trans_field(bcx, base, |_, _| idx) } -fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - index_expr: &ast::Expr, - base: &ast::Expr, - idx: &ast::Expr, - method_call: MethodCall) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_index<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + index_expr: &ast::Expr, + base: &ast::Expr, + idx: &ast::Expr, + method_call: MethodCall) + -> DatumBlock<'blk, 'tcx, Expr> { //! Translates `base[idx]`. let _icx = push_ctxt("trans_index"); + let mut bcx = &mut bl.with_fcx(fcx); let ccx = bcx.ccx(); - let mut bcx = bcx; let index_expr_debug_loc = index_expr.debug_loc(); @@ -858,7 +884,7 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, &[bounds_check, C_bool(ccx, false)], None, index_expr_debug_loc); - bcx = with_cond(bcx, expected, |bcx| { + bcx.bl = with_cond(bcx, expected, |bcx| { controlflow::trans_fail_bounds_check(bcx, expr_info(index_expr), ix_val, @@ -870,13 +896,13 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } }; - DatumBlock::new(bcx, elt_datum) + DatumBlock::new(bcx.bl, elt_datum) } -fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ref_expr: &ast::Expr, - def: def::Def) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_def<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + ref_expr: &ast::Expr, + def: def::Def) + -> DatumBlock<'blk, 'tcx, Expr> { //! Translates a reference to a path. let _icx = push_ctxt("trans_def_lvalue"); @@ -885,7 +911,7 @@ fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def::DefStruct(_) | def::DefVariant(..) => { let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def, bcx.fcx.param_substs); - DatumBlock::new(bcx, datum.to_expr_datum()) + DatumBlock::new(bcx.bl, datum.to_expr_datum()) } def::DefStatic(did, _) => { // There are two things that may happen here: @@ -911,26 +937,28 @@ fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Case 2. base::get_extern_const(bcx.ccx(), did, const_ty) }; - DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr)) + DatumBlock::new(bcx.bl, Datum::new(val, const_ty, LvalueExpr)) } def::DefConst(_) => { bcx.sess().span_bug(ref_expr.span, "constant expression should not reach expr::trans_def") } _ => { - DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum()) + DatumBlock::new(bcx.bl, trans_local_var(bcx, def).to_expr_datum()) } } } -fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; +fn trans_rvalue_stmt_unadjusted<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr) + -> &'blk Block +{ + let mut bcx = &mut bl.with_fcx(fcx); let _icx = push_ctxt("trans_rvalue_stmt"); - if bcx.unreachable.get() { - return bcx; + if bcx.bl.unreachable.get() { + return bcx.bl; } debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); @@ -961,13 +989,13 @@ fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // directly. This avoids having to manage a return slot when // it won't actually be used anyway. if let &Some(ref x) = ex { - bcx = trans_into(bcx, &**x, Ignore); + bcx.bl = trans_into(bcx, &**x, Ignore); } // Mark the end of the block as unreachable. Once we get to // a return expression, there's no more we should be doing // after this. Unreachable(bcx); - bcx + bcx.bl } } ast::ExprWhile(ref cond, ref body, _) => { @@ -1000,10 +1028,10 @@ fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); let src_datum = unpack_datum!( bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign")); - bcx = glue::drop_ty(bcx, - dst_datum.val, - dst_datum.ty, - expr.debug_loc()); + bcx.bl = glue::drop_ty(bcx, + dst_datum.val, + dst_datum.ty, + expr.debug_loc()); src_datum.store_to(bcx, dst_datum.val) } else { src_datum.store_to(bcx, dst_datum.val) @@ -1025,12 +1053,13 @@ fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { +fn trans_rvalue_dps_unadjusted<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + dest: Dest) + -> &'blk Block { let _icx = push_ctxt("trans_rvalue_dps_unadjusted"); - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let tcx = bcx.tcx(); debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); @@ -1040,7 +1069,8 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, trans_into(bcx, &**e, dest) } ast::ExprPath(..) => { - trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest) + let def = bcx.def(expr.id); + trans_def_dps_unadjusted(bcx, expr, def, dest) } ast::ExprIf(ref cond, ref thn, ref els) => { controlflow::trans_if(bcx, expr.id, &**cond, &**thn, els.as_ref().map(|e| &**e), dest) @@ -1052,12 +1082,13 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, controlflow::trans_block(bcx, &**blk, dest) } ast::ExprStruct(_, ref fields, ref base) => { + let ty = node_id_type(bcx, expr.id); trans_struct(bcx, &fields[..], base.as_ref().map(|e| &**e), expr.span, expr.id, - node_id_type(bcx, expr.id), + ty, dest) } ast::ExprRange(ref start, ref end) => { @@ -1115,8 +1146,9 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ast::ExprTup(ref args) => { let numbered_fields: Vec<(usize, &ast::Expr)> = args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect(); + let expr_ty = expr_ty(bcx, expr); trans_adt(bcx, - expr_ty(bcx, expr), + expr_ty, 0, &numbered_fields[..], None, @@ -1146,7 +1178,7 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, Ignore => closure::Dest::Ignore(bcx.ccx()) }; closure::trans_closure_expr(dest, &**decl, &**body, expr.id, bcx.fcx.param_substs) - .unwrap_or(bcx) + .unwrap_or(bcx.bl) } ast::ExprCall(ref f, ref args) => { if bcx.tcx().is_method_call(expr.id) { @@ -1208,16 +1240,16 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ref_expr: &ast::Expr, - def: def::Def, - dest: Dest) - -> Block<'blk, 'tcx> { +fn trans_def_dps_unadjusted<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + ref_expr: &ast::Expr, + def: def::Def, + dest: Dest) + -> &'blk Block { let _icx = push_ctxt("trans_def_dps_unadjusted"); let lldest = match dest { SaveIn(lldest) => lldest, - Ignore => { return bcx; } + Ignore => { return bcx.bl; } }; match def { @@ -1229,14 +1261,14 @@ fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ExprId(ref_expr.id), bcx.fcx.param_substs).val; Store(bcx, llfn, lldest); - return bcx; + return bcx.bl; } else { // Nullary variant. let ty = expr_ty(bcx, ref_expr); let repr = adt::represent_type(bcx.ccx(), ty); adt::trans_set_discr(bcx, &*repr, lldest, variant_info.disr_val); - return bcx; + return bcx.bl; } } def::DefStruct(_) => { @@ -1248,7 +1280,7 @@ fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } _ => {} } - bcx + bcx.bl } _ => { bcx.tcx().sess.span_bug(ref_expr.span, &format!( @@ -1286,16 +1318,16 @@ pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } /// Translates a reference to a local variable or argument. This always results in an lvalue datum. -pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - def: def::Def) - -> Datum<'tcx, Lvalue> { +pub fn trans_local_var<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + def: def::Def) + -> Datum<'tcx, Lvalue> { let _icx = push_ctxt("trans_local_var"); match def { def::DefUpvar(nid, _) => { // Can't move upvars, so this is never a ZeroMemLastUse. let local_ty = node_id_type(bcx, nid); - match bcx.fcx.llupvars.borrow().get(&nid) { + match bcx.fcx.llupvars.get(&nid) { Some(&val) => Datum::new(val, local_ty, Lvalue), None => { bcx.sess().bug(&format!( @@ -1305,7 +1337,7 @@ pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } def::DefLocal(nid) => { - let datum = match bcx.fcx.lllocals.borrow().get(&nid) { + let datum = match bcx.fcx.lllocals.get(&nid) { Some(&v) => v, None => { bcx.sess().bug(&format!( @@ -1381,13 +1413,13 @@ pub fn with_field_tys<'tcx, R, F>(tcx: &ty::ctxt<'tcx>, } } -fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - fields: &[ast::Field], - base: Option<&ast::Expr>, - expr_span: codemap::Span, - expr_id: ast::NodeId, - ty: Ty<'tcx>, - dest: Dest) -> Block<'blk, 'tcx> { +fn trans_struct<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + fields: &[ast::Field], + base: Option<&ast::Expr>, + expr_span: codemap::Span, + expr_id: ast::NodeId, + ty: Ty<'tcx>, + dest: Dest) -> &'blk Block { let _icx = push_ctxt("trans_rec"); let tcx = bcx.tcx(); @@ -1460,16 +1492,17 @@ pub struct StructBaseInfo<'a, 'tcx> { /// /// - `optbase` contains information on the base struct (if any) from /// which remaining fields are copied; see comments on `StructBaseInfo`. -pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - discr: ty::Disr, - fields: &[(usize, &ast::Expr)], - optbase: Option>, - dest: Dest, - debug_location: DebugLoc) - -> Block<'blk, 'tcx> { +pub fn trans_adt<'a, 'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + ty: Ty<'tcx>, + discr: ty::Disr, + fields: &[(usize, &ast::Expr)], + optbase: Option>, + dest: Dest, + debug_location: DebugLoc) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); let _icx = push_ctxt("trans_adt"); - let fcx = bcx.fcx; let repr = adt::represent_type(bcx.ccx(), ty); debug_location.apply(bcx.fcx); @@ -1483,7 +1516,7 @@ pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // This scope holds intermediates that must be cleaned should // panic occur before the ADT as a whole is ready. - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); + let custom_cleanup_scope = bcx.fcx.push_custom_cleanup_scope(); if ty::type_is_simd(bcx.tcx(), ty) { // Issue 23112: The original logic appeared vulnerable to same @@ -1505,7 +1538,7 @@ pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, for &(i, ref e) in fields { let block_datum = trans(bcx, &**e); - bcx = block_datum.bcx; + bcx.bl = block_datum.bcx; let position = C_uint(bcx.ccx(), i); let value = block_datum.datum.to_llscalarish(bcx); vec_val = InsertElement(bcx, vec_val, value, position); @@ -1516,10 +1549,11 @@ pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // requires field expressions eval'ed before base expression. // First, trans field expressions to temporary scratch values. - let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| { + let mut scratch_vals = Vec::new(); + for &(i, ref e) in fields { let datum = unpack_datum!(bcx, trans(bcx, &**e)); - (i, datum) - }).collect(); + scratch_vals.push((i, datum)); + } debug_location.apply(bcx.fcx); @@ -1528,17 +1562,18 @@ pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, match ty::expr_kind(bcx.tcx(), &*base.expr) { ty::RvalueDpsExpr | ty::RvalueDatumExpr if !bcx.fcx.type_needs_drop(ty) => { - bcx = trans_into(bcx, &*base.expr, SaveIn(addr)); + bcx.bl = trans_into(bcx, &*base.expr, SaveIn(addr)); }, ty::RvalueStmtExpr => bcx.tcx().sess.bug("unexpected expr kind for struct base expr"), _ => { let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base")); for &(i, t) in &base.fields { let datum = base_datum.get_element( - bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i)); + bcx, t, + |bcx, srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i)); assert!(type_is_sized(bcx.tcx(), datum.ty)); let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i); - bcx = datum.store_to(bcx, dest); + bcx.bl = datum.store_to(bcx, dest); } } } @@ -1546,40 +1581,40 @@ pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // Finally, move scratch field values into actual field locations for (i, datum) in scratch_vals.into_iter() { let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i); - bcx = datum.store_to(bcx, dest); + bcx.bl = datum.store_to(bcx, dest); } } else { // No base means we can write all fields directly in place. for &(i, ref e) in fields { let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i); let e_ty = expr_ty_adjusted(bcx, &**e); - bcx = trans_into(bcx, &**e, SaveIn(dest)); + bcx.bl = trans_into(bcx, &**e, SaveIn(dest)); let scope = cleanup::CustomScope(custom_cleanup_scope); - fcx.schedule_lifetime_end(scope, dest); - fcx.schedule_drop_mem(scope, dest, e_ty); + bcx.fcx.schedule_lifetime_end(scope, dest); + bcx.fcx.schedule_drop_mem(scope, dest, e_ty); } } adt::trans_set_discr(bcx, &*repr, addr, discr); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); + bcx.fcx.pop_custom_cleanup_scope(custom_cleanup_scope); // If we don't care about the result drop the temporary we made match dest { - SaveIn(_) => bcx, + SaveIn(_) => bcx.bl, Ignore => { - bcx = glue::drop_ty(bcx, addr, ty, debug_location); + bcx.bl = glue::drop_ty(bcx, addr, ty, debug_location); base::call_lifetime_end(bcx, addr); - bcx + bcx.bl } } } -fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - lit: &ast::Lit) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_immediate_lit<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + lit: &ast::Lit) + -> DatumBlock<'blk, 'tcx, Expr> { // must not be a string constant, that is a RvalueDpsExpr let _icx = push_ctxt("trans_immediate_lit"); let ty = expr_ty(bcx, expr); @@ -1587,13 +1622,14 @@ fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock() } -fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - op: ast::UnOp, - sub_expr: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_unary<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + op: ast::UnOp, + sub_expr: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); let ccx = bcx.ccx(); - let mut bcx = bcx; let _icx = push_ctxt("trans_unary_datum"); let method_call = MethodCall::expr(expr.id); @@ -1611,21 +1647,23 @@ fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, match op { ast::UnNot => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc); + let t = trans(bcx, sub_expr); + let datum = unpack_datum!(bcx, t); + let lls = datum.to_llscalarish(bcx); + let llresult = Not(bcx, lls, debug_loc); immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock() } ast::UnNeg => { let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); let val = datum.to_llscalarish(bcx); - let (bcx, llneg) = { + let (bl, llneg) = { if ty::type_is_fp(un_ty) { let result = FNeg(bcx, val, debug_loc); - (bcx, result) + (bcx.bl, result) } else { let is_signed = ty::type_is_signed(un_ty); let result = Neg(bcx, val, debug_loc); - let bcx = if bcx.ccx().check_overflow() && is_signed { + let bl = if bcx.ccx().check_overflow() && is_signed { let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty); let is_min = ICmp(bcx, llvm::IntEQ, val, C_integral(llty, min, true), debug_loc); @@ -1635,15 +1673,16 @@ fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, controlflow::trans_fail(bcx, expr_info(expr), msg) }) } else { - bcx + bcx.bl }; - (bcx, result) + (bl, result) } }; - immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock() + immediate_rvalue_bcx(&mut bl.with_fcx(bcx.fcx), llneg, un_ty).to_expr_datumblock() } ast::UnUniq => { - trans_uniq_expr(bcx, expr, un_ty, sub_expr, expr_ty(bcx, sub_expr)) + let expr_ty = expr_ty(bcx, sub_expr); + trans_uniq_expr(bcx, expr, un_ty, sub_expr, expr_ty) } ast::UnDeref => { let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); @@ -1652,57 +1691,57 @@ fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - box_expr: &ast::Expr, - box_ty: Ty<'tcx>, - contents: &ast::Expr, - contents_ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_uniq_expr<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + box_expr: &ast::Expr, + box_ty: Ty<'tcx>, + contents: &ast::Expr, + contents_ty: Ty<'tcx>) + -> DatumBlock<'blk, 'tcx, Expr> { let _icx = push_ctxt("trans_uniq_expr"); - let fcx = bcx.fcx; assert!(type_is_sized(bcx.tcx(), contents_ty)); let llty = type_of::type_of(bcx.ccx(), contents_ty); let size = llsize_of(bcx.ccx(), llty); let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty)); let llty_ptr = llty.ptr_to(); - let Result { bcx, val } = malloc_raw_dyn(bcx, - llty_ptr, - box_ty, - size, - align, - box_expr.debug_loc()); + let Result { bcx: bl, val } = malloc_raw_dyn(bcx, + llty_ptr, + box_ty, + size, + align, + box_expr.debug_loc()); // Unique boxes do not allocate for zero-size types. The standard library // may assume that `free` is never called on the pointer returned for // `Box`. - let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 { - trans_into(bcx, contents, SaveIn(val)) + let mut bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 { + trans_into(&mut bl.with_fcx(bcx.fcx), contents, SaveIn(val)) } else { - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope), - val, cleanup::HeapExchange, contents_ty); - let bcx = trans_into(bcx, contents, SaveIn(val)); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - bcx - }; - immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock() + let custom_cleanup_scope = bcx.fcx.push_custom_cleanup_scope(); + bcx.fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope), + val, cleanup::HeapExchange, contents_ty); + let bl = trans_into(&mut bl.with_fcx(bcx.fcx), contents, SaveIn(val)); + bcx.fcx.pop_custom_cleanup_scope(custom_cleanup_scope); + bl + }.with_fcx(bcx.fcx); + immediate_rvalue_bcx(&mut bcx, val, box_ty).to_expr_datumblock() } -fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lval: Datum<'tcx, Lvalue>) - -> DatumBlock<'blk, 'tcx, Expr> { +fn ref_fat_ptr<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + lval: Datum<'tcx, Lvalue>) + -> DatumBlock<'blk, 'tcx, Expr> { let dest_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), lval.ty); let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr"); memcpy_ty(bcx, scratch.val, lval.val, scratch.ty); - DatumBlock::new(bcx, scratch.to_expr_datum()) + DatumBlock::new(bcx.bl, scratch.to_expr_datum()) } -fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - subexpr: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_addr_of<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + subexpr: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { let _icx = push_ctxt("trans_addr_of"); - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of")); if !type_is_sized(bcx.tcx(), sub_datum.ty) { // DST lvalue, close to a fat pointer @@ -1716,15 +1755,17 @@ fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Important to get types for both lhs and rhs, because one might be _|_ // and the other not. -fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - binop_expr: &ast::Expr, - binop_ty: Ty<'tcx>, - op: ast::BinOp, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs_t: Ty<'tcx>, - rhs: ValueRef) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_eager_binop<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + binop_expr: &ast::Expr, + binop_ty: Ty<'tcx>, + op: ast::BinOp, + lhs_t: Ty<'tcx>, + lhs: ValueRef, + rhs_t: Ty<'tcx>, + rhs: ValueRef) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); let _icx = push_ctxt("trans_eager_binop"); let tcx = bcx.tcx(); @@ -1740,7 +1781,6 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let binop_debug_loc = binop_expr.debug_loc(); - let mut bcx = bcx; let val = match op.node { ast::BiAdd => { if is_float { @@ -1750,7 +1790,7 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } else { let (newbcx, res) = with_overflow_check( bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; + bcx.bl = newbcx; res } } @@ -1762,7 +1802,7 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } else { let (newbcx, res) = with_overflow_check( bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; + bcx.bl = newbcx; res } } @@ -1774,7 +1814,7 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } else { let (newbcx, res) = with_overflow_check( bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; + bcx.bl = newbcx; res } } @@ -1783,12 +1823,12 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, FDiv(bcx, lhs, rhs, binop_debug_loc) } else { // Only zero-check integers; fp /0 is NaN - bcx = base::fail_if_zero_or_overflows(bcx, - expr_info(binop_expr), - op, - lhs, - rhs, - rhs_t); + bcx.bl = base::fail_if_zero_or_overflows(bcx, + expr_info(binop_expr), + op, + lhs, + rhs, + rhs_t); if is_signed { SDiv(bcx, lhs, rhs, binop_debug_loc) } else { @@ -1801,9 +1841,9 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, FRem(bcx, lhs, rhs, binop_debug_loc) } else { // Only zero-check integers; fp %0 is NaN - bcx = base::fail_if_zero_or_overflows(bcx, - expr_info(binop_expr), - op, lhs, rhs, rhs_t); + bcx.bl = base::fail_if_zero_or_overflows(bcx, + expr_info(binop_expr), + op, lhs, rhs, rhs_t); if is_signed { SRem(bcx, lhs, rhs, binop_debug_loc) } else { @@ -1817,13 +1857,13 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ast::BiShl => { let (newbcx, res) = with_overflow_check( bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; + bcx.bl = newbcx; res } ast::BiShr => { let (newbcx, res) = with_overflow_check( bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; + bcx.bl = newbcx; res } ast::BiEq | ast::BiNe | ast::BiLt | ast::BiGe | ast::BiLe | ast::BiGt => { @@ -1847,51 +1887,57 @@ enum lazy_binop_ty { lazy_or, } -fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - binop_expr: &ast::Expr, - op: lazy_binop_ty, - a: &ast::Expr, - b: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_lazy_binop<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + binop_expr: &ast::Expr, + op: lazy_binop_ty, + a: &ast::Expr, + b: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { let _icx = push_ctxt("trans_lazy_binop"); let binop_ty = expr_ty(bcx, binop_expr); - let fcx = bcx.fcx; let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a); - let lhs = lhs.to_llscalarish(past_lhs); + let lhs = lhs.to_llscalarish(&mut past_lhs.with_fcx(bcx.fcx)); if past_lhs.unreachable.get() { - return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock(); + return immediate_rvalue_bcx(&mut past_lhs.with_fcx(bcx.fcx), lhs, binop_ty) + .to_expr_datumblock(); } - let join = fcx.new_id_block("join", binop_expr.id); - let before_rhs = fcx.new_id_block("before_rhs", b.id); + let join = bcx.fcx.new_id_block("join", binop_expr.id); + let before_rhs = bcx.fcx.new_id_block("before_rhs", b.id); match op { - lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None), - lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None) + lazy_and => CondBr(&mut past_lhs.with_fcx(bcx.fcx), + lhs, before_rhs.llbb, join.llbb, DebugLoc::None), + lazy_or => CondBr(&mut past_lhs.with_fcx(bcx.fcx), + lhs, join.llbb, before_rhs.llbb, DebugLoc::None) } - let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b); - let rhs = rhs.to_llscalarish(past_rhs); + let DatumBlock {bcx: past_rhs, datum: rhs} = trans(&mut before_rhs.with_fcx(bcx.fcx), b); + let rhs = rhs.to_llscalarish(&mut past_rhs.with_fcx(bcx.fcx)); if past_rhs.unreachable.get() { - return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock(); + return immediate_rvalue_bcx(&mut join.with_fcx(bcx.fcx), + lhs, binop_ty).to_expr_datumblock(); } - Br(past_rhs, join.llbb, DebugLoc::None); - let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs], + Br(&mut past_rhs.with_fcx(bcx.fcx), join.llbb, DebugLoc::None); + let ty = Type::i1(bcx.ccx()); + let phi = Phi(&mut join.with_fcx(bcx.fcx), ty, &[lhs, rhs], &[past_lhs.llbb, past_rhs.llbb]); - return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock(); + return immediate_rvalue_bcx(&mut join.with_fcx(bcx.fcx), phi, binop_ty).to_expr_datumblock(); } -fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - op: ast::BinOp, - lhs: &ast::Expr, - rhs: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { +fn trans_binary<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + op: ast::BinOp, + lhs: &ast::Expr, + rhs: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); let _icx = push_ctxt("trans_binary"); let ccx = bcx.ccx(); @@ -1928,18 +1974,19 @@ fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - method_call: MethodCall, - lhs: Datum<'tcx, Expr>, - rhs: Vec<(Datum<'tcx, Expr>, ast::NodeId)>, - dest: Option, - autoref: bool) - -> Result<'blk, 'tcx> { +fn trans_overloaded_op<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + method_call: MethodCall, + lhs: Datum<'tcx, Expr>, + rhs: Vec<(Datum<'tcx, Expr>, ast::NodeId)>, + dest: Option, + autoref: bool) + -> Result<'blk> { let method_ty = bcx.tcx().method_map.borrow().get(&method_call).unwrap().ty; + let ty = monomorphize_type(bcx, method_ty); callee::trans_call_inner(bcx, expr.debug_loc(), - monomorphize_type(bcx, method_ty), + ty, |bcx, arg_cleanup_scope| { meth::trans_method_callee(bcx, method_call, @@ -1950,12 +1997,14 @@ fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dest) } -fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - callee: &'a ast::Expr, - args: &'a [P], - dest: Option) - -> Block<'blk, 'tcx> { +fn trans_overloaded_call<'a, 'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + callee: &'a ast::Expr, + args: &'a [P], + dest: Option) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); let method_call = MethodCall::expr(expr.id); let method_type = bcx.tcx() .method_map @@ -1965,11 +2014,11 @@ fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, .ty; let mut all_args = vec!(callee); all_args.extend(args.iter().map(|e| &**e)); + let ty = monomorphize_type(bcx, method_type); unpack_result!(bcx, callee::trans_call_inner(bcx, expr.debug_loc(), - monomorphize_type(bcx, - method_type), + ty, |bcx, arg_cleanup_scope| { meth::trans_method_callee( bcx, @@ -1979,7 +2028,7 @@ fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, }, callee::ArgOverloadedCall(all_args), dest)); - bcx + bcx.bl } pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>, @@ -2004,15 +2053,17 @@ pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>, } } -fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - id: ast::NodeId) - -> DatumBlock<'blk, 'tcx, Expr> +fn trans_imm_cast<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + id: ast::NodeId) + -> DatumBlock<'blk, 'tcx, Expr> { use middle::cast::CastTy::*; use middle::cast::IntTy::*; - fn int_cast(bcx: Block, + + fn int_cast(bcx: &mut BlockContext, lldsttype: Type, llsrctype: Type, llsrc: ValueRef, @@ -2033,7 +2084,7 @@ fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } - fn float_cast(bcx: Block, + fn float_cast(bcx: &mut BlockContext, lldsttype: Type, llsrctype: Type, llsrc: ValueRef) @@ -2050,7 +2101,7 @@ fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } let _icx = push_ctxt("trans_cast"); - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); let ccx = bcx.ccx(); let t_in = expr_ty_adjusted(bcx, expr); @@ -2067,23 +2118,25 @@ fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) { datum.ty = t_out; - return DatumBlock::new(bcx, datum); + return DatumBlock::new(bcx.bl, datum); } if type_is_fat_ptr(bcx.tcx(), t_in) { assert!(datum.kind.is_by_ref()); if type_is_fat_ptr(bcx.tcx(), t_out) { - return DatumBlock::new(bcx, Datum::new( - PointerCast(bcx, datum.val, ll_t_out.ptr_to()), + let pc = PointerCast(bcx, datum.val, ll_t_out.ptr_to()); + return DatumBlock::new(bcx.bl, Datum::new( + pc, t_out, Rvalue::new(ByRef) )).to_expr_datumblock(); } else { // Return the address + let dp = get_dataptr(bcx, datum.val); + let ld = Load(bcx, dp); + let pc = PointerCast(bcx, ld, ll_t_out); return immediate_rvalue_bcx(bcx, - PointerCast(bcx, - Load(bcx, get_dataptr(bcx, datum.val)), - ll_t_out), + pc, t_out).to_expr_datumblock(); } } @@ -2127,14 +2180,15 @@ fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock(); } -fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - op: ast::BinOp, - dst: &ast::Expr, - src: &ast::Expr) - -> Block<'blk, 'tcx> { +fn trans_assign_op<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + op: ast::BinOp, + dst: &ast::Expr, + src: &ast::Expr) + -> &'blk Block { let _icx = push_ctxt("trans_assign_op"); - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); debug!("trans_assign_op(expr={})", bcx.expr_to_string(expr)); @@ -2159,11 +2213,12 @@ fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, return result_datum.store_to(bcx, dst_datum.val); } -fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - datum: Datum<'tcx, Expr>, - expr: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; +fn auto_ref<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + datum: Datum<'tcx, Expr>, + expr: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); // Ensure cleanup of `datum` if not already scheduled and obtain // a "by ref" pointer. @@ -2180,28 +2235,31 @@ fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Construct the resulting datum, using what was the "by ref" // ValueRef of type `referent_ty` to be the "by value" ValueRef // of type `&referent_ty`. - DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(ByValue)))) + DatumBlock::new(bcx.bl, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(ByValue)))) } -fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - datum: Datum<'tcx, Expr>, - times: usize) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; +fn deref_multiple<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + datum: Datum<'tcx, Expr>, + times: usize) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); let mut datum = datum; for i in 0..times { let method_call = MethodCall::autoderef(expr.id, i as u32); datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call)); } - DatumBlock { bcx: bcx, datum: datum } + DatumBlock { bcx: bcx.bl, datum: datum } } -fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - datum: Datum<'tcx, Expr>, - method_call: MethodCall) - -> DatumBlock<'blk, 'tcx, Expr> { +fn deref_once<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + datum: Datum<'tcx, Expr>, + method_call: MethodCall) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); let ccx = bcx.ccx(); debug!("deref_once(expr={}, datum={}, method_call={:?})", @@ -2209,8 +2267,6 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, datum.to_string(ccx), method_call); - let mut bcx = bcx; - // Check for overloaded deref. let method_ty = ccx.tcx().method_map.borrow() .get(&method_call).map(|method| method.ty); @@ -2255,7 +2311,7 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if type_is_sized(bcx.tcx(), content_ty) { let ptr = load_ty(bcx, datum.val, datum.ty); - DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr)) + DatumBlock::new(bcx.bl, Datum::new(ptr, content_ty, LvalueExpr)) } else { // A fat pointer and a DST lvalue have the same representation // just different types. Since there is no temporary for `*e` @@ -2264,7 +2320,7 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // we schedule cleanup for `e`, turning it into an lvalue. let datum = Datum::new(datum.val, content_ty, LvalueExpr); - DatumBlock::new(bcx, datum) + DatumBlock::new(bcx.bl, datum) } } @@ -2278,11 +2334,11 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // rvalue for non-owning pointers like &T or *T, in which // case cleanup *is* scheduled elsewhere, by the true // owner (or, in the case of *T, by the user). - DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr)) + DatumBlock::new(bcx.bl, Datum::new(ptr, content_ty, LvalueExpr)) } else { // A fat pointer and a DST lvalue have the same representation // just different types. - DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr)) + DatumBlock::new(bcx.bl, Datum::new(datum.val, content_ty, LvalueExpr)) } } @@ -2334,7 +2390,9 @@ enum OverflowOpViaInputCheck { Shl, Shr, } enum OverflowOpViaIntrinsic { Add, Sub, Mul, } impl OverflowOpViaIntrinsic { - fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef { + fn to_intrinsic<'r, 'blk, 'tcx>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + lhs_ty: Ty) -> ValueRef { let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty); bcx.ccx().get_intrinsic(&name) } @@ -2402,23 +2460,25 @@ impl OverflowOpViaIntrinsic { } } - fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, - info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) { + fn build_intrinsic_call<'r, 'blk, 'tcx>(&self, bcx: &mut BlockContext<'r, 'blk, 'tcx>, + info: NodeIdAndSpan, + lhs_t: Ty<'tcx>, lhs: ValueRef, + rhs: ValueRef, + binop_debug_loc: DebugLoc) + -> (&'blk Block, ValueRef) { let llfn = self.to_intrinsic(bcx, lhs_t); let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc); let result = ExtractValue(bcx, val, 0); // iN operation result let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?" - let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false), + let ty = Type::i1(bcx.ccx()); + let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(ty, 1, false), binop_debug_loc); let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); - Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)], + let ty = Type::i1(bcx.ccx()); + Call(bcx, expect, &[cond, C_integral(ty, 0, false)], None, binop_debug_loc); let bcx = @@ -2431,14 +2491,14 @@ impl OverflowOpViaIntrinsic { } impl OverflowOpViaInputCheck { - fn build_with_input_check<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) + fn build_with_input_check<'r, 'blk, 'tcx>(&self, + bcx: &mut BlockContext<'r, 'blk, 'tcx>, + info: NodeIdAndSpan, + lhs_t: Ty<'tcx>, + lhs: ValueRef, + rhs: ValueRef, + binop_debug_loc: DebugLoc) + -> (&'blk Block, ValueRef) { let lhs_llty = val_ty(lhs); let rhs_llty = val_ty(rhs); @@ -2481,21 +2541,21 @@ fn shift_mask_val(llty: Type) -> u64 { // all shifts). For 32- and 64-bit types, this matches the semantics // of Java. (See related discussion on #1877 and #10183.) -fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { +fn build_unchecked_lshift<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + lhs: ValueRef, + rhs: ValueRef, + binop_debug_loc: DebugLoc) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShl, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); Shl(bcx, lhs, rhs, binop_debug_loc) } -fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { +fn build_unchecked_rshift<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + lhs_t: Ty<'tcx>, + lhs: ValueRef, + rhs: ValueRef, + binop_debug_loc: DebugLoc) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShr, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); @@ -2507,20 +2567,21 @@ fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - rhs: ValueRef, - debug_loc: DebugLoc) -> ValueRef { +fn shift_mask_rhs<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + rhs: ValueRef, + debug_loc: DebugLoc) -> ValueRef { let rhs_llty = val_ty(rhs); let mask = shift_mask_val(rhs_llty); And(bcx, rhs, C_integral(rhs_llty, mask, false), debug_loc) } -fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) { - if bcx.unreachable.get() { return (bcx, _Undef(lhs)); } +fn with_overflow_check<'r, 'blk, 'tcx> + (bcx: &mut BlockContext<'r, 'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan, + lhs_t: Ty<'tcx>, lhs: ValueRef, + rhs: ValueRef, + binop_debug_loc: DebugLoc) + -> (&'blk Block, ValueRef) { + if bcx.bl.unreachable.get() { return (bcx.bl, _Undef(lhs)); } if bcx.ccx().check_overflow() { match oop.codegen_strategy() { @@ -2540,6 +2601,6 @@ fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info OverflowOp::Shr => build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), }; - (bcx, res) + (bcx.bl, res) } } diff --git a/src/librustc_trans/trans/foreign.rs b/src/librustc_trans/trans/foreign.rs index 95c93d76585bf..e0a75fbf88d67 100644 --- a/src/librustc_trans/trans/foreign.rs +++ b/src/librustc_trans/trans/foreign.rs @@ -222,14 +222,14 @@ pub fn register_foreign_item_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, /// can derive these from callee_ty but in the case of variadic /// functions passed_arg_tys will include the Rust type of all /// the arguments including the ones not specified in the fn's signature. -pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - callee_ty: Ty<'tcx>, - llfn: ValueRef, - llretptr: ValueRef, - llargs_rust: &[ValueRef], - passed_arg_tys: Vec>, - call_debug_loc: DebugLoc) - -> Block<'blk, 'tcx> +pub fn trans_native_call<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + callee_ty: Ty<'tcx>, + llfn: ValueRef, + llretptr: ValueRef, + llargs_rust: &[ValueRef], + passed_arg_tys: Vec>, + call_debug_loc: DebugLoc) + -> &'blk Block { let ccx = bcx.ccx(); let tcx = bcx.tcx(); @@ -319,7 +319,8 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } else { if ty::type_is_bool(passed_arg_tys[i]) { let val = LoadRangeAssert(bcx, llarg_rust, 0, 2, llvm::False); - Trunc(bcx, val, Type::i1(bcx.ccx())) + let ty = Type::i1(bcx.ccx()); + Trunc(bcx, val, ty) } else { Load(bcx, llarg_rust) } @@ -435,7 +436,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } - return bcx; + return bcx.bl; } // feature gate SIMD types in FFI, since I (huonw) am not sure the diff --git a/src/librustc_trans/trans/glue.rs b/src/librustc_trans/trans/glue.rs index 264957d36513b..180a27d939dd2 100644 --- a/src/librustc_trans/trans/glue.rs +++ b/src/librustc_trans/trans/glue.rs @@ -46,39 +46,35 @@ use arena::TypedArena; use libc::c_uint; use syntax::ast; -pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - v: ValueRef, - size: ValueRef, - align: ValueRef, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { +pub fn trans_exchange_free_dyn<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, + v: ValueRef, + size: ValueRef, + align: ValueRef, + debug_loc: DebugLoc) + -> &'blk Block { let _icx = push_ctxt("trans_exchange_free"); let ccx = cx.ccx(); - callee::trans_lang_call(cx, - langcall(cx, None, "", ExchangeFreeFnLangItem), - &[PointerCast(cx, v, Type::i8p(ccx)), size, align], - Some(expr::Ignore), - debug_loc).bcx + let lc = langcall(cx, None, "", ExchangeFreeFnLangItem); + let pc = [PointerCast(cx, v, Type::i8p(ccx)), size, align]; + callee::trans_lang_call(cx, lc, &pc, Some(expr::Ignore), debug_loc).bcx } -pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - v: ValueRef, - size: u64, - align: u32, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - trans_exchange_free_dyn(cx, - v, - C_uint(cx.ccx(), size), - C_uint(cx.ccx(), align), - debug_loc) +pub fn trans_exchange_free<'r, 'blk, 'tcx>(cx: &mut BlockContext<'r, 'blk, 'tcx>, + v: ValueRef, + size: u64, + align: u32, + debug_loc: DebugLoc) + -> &'blk Block { + let s = C_uint(cx.ccx(), size); + let a = C_uint(cx.ccx(), align); + trans_exchange_free_dyn(cx, v, s, a, debug_loc) } -pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ptr: ValueRef, - content_ty: Ty<'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { +pub fn trans_exchange_free_ty<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + ptr: ValueRef, + content_ty: Ty<'tcx>, + debug_loc: DebugLoc) + -> &'blk Block { assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); let sizing_type = sizing_type_of(bcx.ccx(), content_ty); let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); @@ -88,7 +84,7 @@ pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let content_align = align_of(bcx.ccx(), content_ty); trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc) } else { - bcx + bcx.bl } } @@ -128,18 +124,18 @@ pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } } -pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc) -> Block<'blk, 'tcx> { +pub fn drop_ty<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + debug_loc: DebugLoc) -> &'blk Block { drop_ty_core(bcx, v, t, debug_loc, false) } -pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc, - skip_dtor: bool) -> Block<'blk, 'tcx> { +pub fn drop_ty_core<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + debug_loc: DebugLoc, + skip_dtor: bool) -> &'blk Block { // NB: v is an *alias* of type t here, not a direct value. debug!("drop_ty_core(t={}, skip_dtor={})", t.repr(bcx.tcx()), skip_dtor); let _icx = push_ctxt("drop_ty"); @@ -160,17 +156,18 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, Call(bcx, glue, &[ptr], None, debug_loc); } - bcx + bcx.bl } -pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc, - skip_dtor: bool) - -> Block<'blk, 'tcx> { +pub fn drop_ty_immediate<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + debug_loc: DebugLoc, + skip_dtor: bool) + -> &'blk Block { let _icx = push_ctxt("drop_ty_immediate"); - let vp = alloca(bcx, type_of(bcx.ccx(), t), ""); + let ty = type_of(bcx.ccx(), t); + let vp = alloca(bcx, ty, ""); store_ty(bcx, v, vp, t); drop_ty_core(bcx, vp, t, debug_loc, skip_dtor) } @@ -250,13 +247,14 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let _s = StatRecorder::new(ccx, format!("drop {}", ty_to_short_str(ccx.tcx(), t))); let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); - let (arena, fcx): (TypedArena<_>, FunctionContext); + let (arena, mut fcx): (TypedArena<_>, FunctionContext); arena = TypedArena::new(); fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false, ty::FnConverging(ty::mk_nil(ccx.tcx())), empty_substs, None, &arena); + let mut fcx = &mut fcx; - let bcx = init_function(&fcx, false, ty::FnConverging(ty::mk_nil(ccx.tcx()))); + let bcx = init_function(fcx, false, ty::FnConverging(ty::mk_nil(ccx.tcx()))); update_linkage(ccx, llfn, None, OriginalTranslation); @@ -270,41 +268,42 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // type, so we don't need to explicitly cast the function parameter. let llrawptr0 = get_param(llfn, fcx.arg_pos(0) as c_uint); - let bcx = make_drop_glue(bcx, llrawptr0, g); - finish_fn(&fcx, bcx, ty::FnConverging(ty::mk_nil(ccx.tcx())), DebugLoc::None); + let bcx = make_drop_glue(&mut bcx.with_fcx(fcx), llrawptr0, g); + finish_fn(fcx, bcx, ty::FnConverging(ty::mk_nil(ccx.tcx())), DebugLoc::None); llfn } -fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - struct_data: ValueRef, - dtor_did: ast::DefId, - class_did: ast::DefId, - substs: &subst::Substs<'tcx>) - -> Block<'blk, 'tcx> { +fn trans_struct_drop_flag<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + t: Ty<'tcx>, + struct_data: ValueRef, + dtor_did: ast::DefId, + class_did: ast::DefId, + substs: &subst::Substs<'tcx>) + -> &'blk Block { + let mut bcx = &mut bl.with_fcx(fcx); assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized"); let repr = adt::represent_type(bcx.ccx(), t); let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data)); - let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type()); - let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); + let dty = bcx.tcx().dtor_type(); + let loaded = load_ty(bcx, drop_flag.val, dty); + let drop_flag_llty = type_of(bcx.fcx.ccx, dty); let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false); - let bcx = if !bcx.ccx().check_drop_flag_for_sanity() { - bcx - } else { + if bcx.ccx().check_drop_flag_for_sanity() { let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false); let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None); let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None); let drop_flag_neither_initialized_nor_cleared = And(bcx, not_init, not_done, DebugLoc::None); - with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| { + bcx.bl = with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| { let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap")); Call(cx, llfn, &[], None, DebugLoc::None); - cx - }) + cx.bl + }); }; let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None); @@ -347,13 +346,13 @@ pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } } -fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - v0: ValueRef, - dtor_did: ast::DefId, - class_did: ast::DefId, - substs: &subst::Substs<'tcx>) - -> Block<'blk, 'tcx> +fn trans_struct_drop<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + t: Ty<'tcx>, + v0: ValueRef, + dtor_did: ast::DefId, + class_did: ast::DefId, + substs: &subst::Substs<'tcx>) + -> &'blk Block { debug!("trans_struct_drop t: {}", bcx.ty_to_string(t)); @@ -382,13 +381,14 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let glue_type = get_drop_glue_type(bcx.ccx(), t); let dtor_ty = ty::mk_ctor_fn(bcx.tcx(), class_did, &[glue_type], ty::mk_nil(bcx.tcx())); - let (_, bcx) = invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None); + let (_, bl) = invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None); - bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) + bcx.fcx.pop_and_trans_custom_cleanup_scope(bl, contents_scope) } -pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef) - -> (ValueRef, ValueRef) { +pub fn size_and_align_of_dst<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + t: Ty<'tcx>, info: ValueRef) + -> (ValueRef, ValueRef) { debug!("calculate size of DST: {}; with lost info: {}", bcx.ty_to_string(t), bcx.val_to_string(info)); if type_is_sized(bcx.tcx(), t) { @@ -417,20 +417,15 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in // Return the sum of sizes and max of aligns. let size = Add(bcx, sized_size, unsized_size, DebugLoc::None); - let align = Select(bcx, - ICmp(bcx, - llvm::IntULT, - sized_align, - unsized_align, - DebugLoc::None), - sized_align, - unsized_align); + let cmp = ICmp(bcx, llvm::IntULT, sized_align, unsized_align, DebugLoc::None); + let align = Select(bcx, cmp, sized_align, unsized_align); (size, align) } ty::ty_trait(..) => { // info points to the vtable and the second entry in the vtable is the // dynamic size of the object. - let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to()); + let ty = Type::int(bcx.ccx()); + let info = PointerCast(bcx, info, ty.ptr_to()); let size_ptr = GEPi(bcx, info, &[1]); let align_ptr = GEPi(bcx, info, &[2]); (Load(bcx, size_ptr), Load(bcx, align_ptr)) @@ -442,7 +437,8 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty); let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty); - (Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None), + let ty = C_uint(bcx.ccx(), unit_size); + (Mul(bcx, info, ty, DebugLoc::None), C_uint(bcx.ccx(), unit_align)) } _ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", @@ -450,8 +446,10 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in } } -fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>) - -> Block<'blk, 'tcx> { +fn make_drop_glue<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + v0: ValueRef, + g: DropGlueKind<'tcx>) + -> &'blk Block { let t = g.ty(); let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; // NB: v0 is an *alias* of type t here, not a direct value. @@ -474,20 +472,22 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK if !type_is_sized(bcx.tcx(), content_ty) { let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]); let llbox = Load(bcx, llval); - let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx())); + let ty = Type::int(bcx.ccx()); + let llbox_as_usize = PtrToInt(bcx, llbox, ty); let drop_flag_not_dropped_already = ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); with_cond(bcx, drop_flag_not_dropped_already, |bcx| { - let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); + let bcx = &mut drop_ty(bcx, v0, content_ty, DebugLoc::None).with_fcx(bcx.fcx); let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]); let info = Load(bcx, info); let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info); // `Box` does not allocate. + let ty = C_uint(bcx.ccx(), 0u64); let needs_free = ICmp(bcx, llvm::IntNE, llsize, - C_uint(bcx.ccx(), 0u64), + ty, DebugLoc::None); with_cond(bcx, needs_free, |bcx| { trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) @@ -500,7 +500,8 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK let drop_flag_not_dropped_already = ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); with_cond(bcx, drop_flag_not_dropped_already, |bcx| { - let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); + let bcx = &mut drop_ty(bcx, + llbox, content_ty, DebugLoc::None).with_fcx(bcx.fcx); trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) }) } @@ -540,14 +541,18 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK // okay with always calling the Drop impl, if any. assert!(!skip_dtor); let data_ptr = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]); - let vtable_ptr = Load(bcx, GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA])); + let fpe = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]); + let vtable_ptr = Load(bcx, fpe); let dtor = Load(bcx, vtable_ptr); + let dp = Load(bcx, data_ptr); + let ty = Type::i8p(bcx.ccx()); + let pc = &[PointerCast(bcx, dp, ty)]; Call(bcx, dtor, - &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))], + pc, None, DebugLoc::None); - bcx + bcx.bl } _ => { if bcx.fcx.type_needs_drop(t) { @@ -556,7 +561,7 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None)) } else { - bcx + bcx.bl } } } diff --git a/src/librustc_trans/trans/intrinsic.rs b/src/librustc_trans/trans/intrinsic.rs index 4608918ec59bd..3adbccf3b02a2 100644 --- a/src/librustc_trans/trans/intrinsic.rs +++ b/src/librustc_trans/trans/intrinsic.rs @@ -147,17 +147,18 @@ pub fn check_intrinsics(ccx: &CrateContext) { /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs -pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - node: ast::NodeId, - callee_ty: Ty<'tcx>, - cleanup_scope: cleanup::CustomScopeIndex, - args: callee::CallArgs<'a, 'tcx>, - dest: expr::Dest, - substs: subst::Substs<'tcx>, - call_info: NodeIdAndSpan) - -> Result<'blk, 'tcx> { - let fcx = bcx.fcx; - let ccx = fcx.ccx; +pub fn trans_intrinsic_call<'a, 'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + node: ast::NodeId, + callee_ty: Ty<'tcx>, + cleanup_scope: cleanup::CustomScopeIndex, + args: callee::CallArgs<'a, 'tcx>, + dest: expr::Dest, + substs: subst::Substs<'tcx>, + call_info: NodeIdAndSpan) + -> Result<'blk> { + let mut bcx = &mut bl.with_fcx(fcx); + let ccx = bcx.fcx.ccx; let tcx = bcx.tcx(); let _icx = push_ctxt("trans_intrinsic_call"); @@ -245,16 +246,16 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())), expr::Ignore => expr::Ignore }; - bcx = expr::trans_into(bcx, &*arg_exprs[0], dest); + bcx.bl = expr::trans_into(bcx, &*arg_exprs[0], dest); dest }; - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); + bcx.fcx.scopes.last_mut().unwrap().drop_non_lifetime_clean(); + bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx.bl, cleanup_scope); return match dest { - expr::SaveIn(d) => Result::new(bcx, d), - expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to())) + expr::SaveIn(d) => Result::new(bcx.bl, d), + expr::Ignore => Result::new(bcx.bl, C_undef(llret_ty.ptr_to())) }; } @@ -267,15 +268,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // Push the arguments. let mut llargs = Vec::new(); - bcx = callee::trans_args(bcx, - args, - callee_ty, - &mut llargs, - cleanup::CustomScope(cleanup_scope), - false, - RustIntrinsic); + bcx.bl = callee::trans_args(bcx, + args, + callee_ty, + &mut llargs, + cleanup::CustomScope(cleanup_scope), + false, + RustIntrinsic); - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); + bcx.fcx.scopes.last_mut().unwrap().drop_non_lifetime_clean(); let call_debug_location = DebugLoc::At(call_info.id, call_info.span); @@ -283,13 +284,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, if &name[..] == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); Call(bcx, llfn, &[], None, call_debug_location); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); + bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx.bl, cleanup_scope); Unreachable(bcx); - return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); + return Result::new(bcx.bl, C_undef(Type::nil(ccx).ptr_to())); } else if &name[..] == "unreachable" { - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); + bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx.bl, cleanup_scope); Unreachable(bcx); - return Result::new(bcx, C_nil(ccx)); + return Result::new(bcx.bl, C_nil(ccx)); } let ret_ty = match ret_ty { @@ -329,7 +330,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "size_of_val") => { let tp_ty = *substs.types.get(FnSpace, 0); if !type_is_sized(tcx, tp_ty) { - let info = Load(bcx, expr::get_len(bcx, llargs[0])); + let l = expr::get_len(bcx, llargs[0]); + let info = Load(bcx, l); let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, info); llsize } else { @@ -344,7 +346,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "min_align_of_val") => { let tp_ty = *substs.types.get(FnSpace, 0); if !type_is_sized(tcx, tp_ty) { - let info = Load(bcx, expr::get_len(bcx, llargs[0])); + let l = expr::get_len(bcx, llargs[0]); + let info = Load(bcx, l); let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, info); llalign } else { @@ -369,7 +372,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, ty: tp_ty, kind: Rvalue::new(mode) }; - bcx = src.store_to(bcx, llargs[0]); + bcx.bl = src.store_to(bcx, llargs[0]); C_nil(ccx) } (_, "drop_in_place") => { @@ -716,13 +719,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "overflowing_mul") => Mul(bcx, llargs[0], llargs[1], call_debug_location), (_, "return_address") => { - if !fcx.caller_expects_out_pointer { + if !bcx.fcx.caller_expects_out_pointer { tcx.sess.span_err(call_info.span, "invalid use of `return_address` intrinsic: function \ does not use out pointer"); C_null(Type::i8p(ccx)) } else { - PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx)) + PointerCast(bcx, llvm::get_param(bcx.fcx.llfn, 0), Type::i8p(ccx)) } } @@ -787,7 +790,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, "load" => { let tp_ty = *substs.types.get(FnSpace, 0); let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - from_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty) + let ld = AtomicLoad(bcx, ptr, order); + from_arg_ty(bcx, ld, tp_ty) } "store" => { let tp_ty = *substs.types.get(FnSpace, 0); @@ -844,25 +848,25 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // If we made a temporary stack slot, let's clean it up match dest { expr::Ignore => { - bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location); + bcx.bl = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location); } expr::SaveIn(_) => {} } - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); + bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx.bl, cleanup_scope); - Result::new(bcx, llresult) + Result::new(bcx.bl, llresult) } -fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - allow_overlap: bool, - volatile: bool, - tp_ty: Ty<'tcx>, - dst: ValueRef, - src: ValueRef, - count: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { +fn copy_intrinsic<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + allow_overlap: bool, + volatile: bool, + tp_ty: Ty<'tcx>, + dst: ValueRef, + src: ValueRef, + count: ValueRef, + call_debug_location: DebugLoc) + -> ValueRef { let ccx = bcx.ccx(); let lltp_ty = type_of::type_of(ccx, tp_ty); let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); @@ -886,25 +890,27 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let src_ptr = PointerCast(bcx, src, Type::i8p(ccx)); let llfn = ccx.get_intrinsic(&name); + let mul = Mul(bcx, size, count, DebugLoc::None); + let ty = C_bool(ccx, volatile); Call(bcx, llfn, &[dst_ptr, src_ptr, - Mul(bcx, size, count, DebugLoc::None), + mul, align, - C_bool(ccx, volatile)], + ty], None, call_debug_location) } -fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - volatile: bool, - tp_ty: Ty<'tcx>, - dst: ValueRef, - val: ValueRef, - count: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { +fn memset_intrinsic<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + volatile: bool, + tp_ty: Ty<'tcx>, + dst: ValueRef, + val: ValueRef, + count: ValueRef, + call_debug_location: DebugLoc) + -> ValueRef { let ccx = bcx.ccx(); let lltp_ty = type_of::type_of(ccx, tp_ty); let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); @@ -918,18 +924,20 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); let llfn = ccx.get_intrinsic(&name); + let mul = Mul(bcx, size, count, DebugLoc::None); + let ty = C_bool(ccx, volatile); Call(bcx, llfn, &[dst_ptr, val, - Mul(bcx, size, count, DebugLoc::None), + mul, align, - C_bool(ccx, volatile)], + ty], None, call_debug_location) } -fn count_zeros_intrinsic(bcx: Block, +fn count_zeros_intrinsic(bcx: &mut BlockContext, name: &'static str, val: ValueRef, call_debug_location: DebugLoc) @@ -939,19 +947,21 @@ fn count_zeros_intrinsic(bcx: Block, Call(bcx, llfn, &[val, y], None, call_debug_location) } -fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - name: &'static str, - t: Ty<'tcx>, - a: ValueRef, - b: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { +fn with_overflow_intrinsic<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + name: &'static str, + t: Ty<'tcx>, + a: ValueRef, + b: ValueRef, + call_debug_location: DebugLoc) + -> ValueRef { let llfn = bcx.ccx().get_intrinsic(&name); // Convert `i1` to a `bool`, and write it to the out parameter let val = Call(bcx, llfn, &[a, b], None, call_debug_location); let result = ExtractValue(bcx, val, 0); - let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); + let v = ExtractValue(bcx, val, 1); + let ty = Type::bool(bcx.ccx()); + let overflow = ZExt(bcx, v, ty); let ret = C_undef(type_of::type_of(bcx.ccx(), t)); let ret = InsertValue(bcx, ret, result, 0); let ret = InsertValue(bcx, ret, overflow, 1); diff --git a/src/librustc_trans/trans/macros.rs b/src/librustc_trans/trans/macros.rs index 77efcc6fb0030..53c55ffcf023e 100644 --- a/src/librustc_trans/trans/macros.rs +++ b/src/librustc_trans/trans/macros.rs @@ -12,7 +12,7 @@ macro_rules! unpack_datum { ($bcx: ident, $inp: expr) => ( { let db = $inp; - $bcx = db.bcx; + $bcx.bl = db.bcx; db.datum } ) @@ -22,7 +22,7 @@ macro_rules! unpack_result { ($bcx: ident, $inp: expr) => ( { let db = $inp; - $bcx = db.bcx; + $bcx.bl = db.bcx; db.val } ) diff --git a/src/librustc_trans/trans/meth.rs b/src/librustc_trans/trans/meth.rs index 7039968b029ac..6e997a6e33d53 100644 --- a/src/librustc_trans/trans/meth.rs +++ b/src/librustc_trans/trans/meth.rs @@ -101,7 +101,7 @@ pub fn trans_impl(ccx: &CrateContext, } } -pub fn trans_method_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn trans_method_callee<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, method_call: MethodCall, self_expr: Option<&ast::Expr>, arg_cleanup_scope: cleanup::ScopeId) @@ -119,7 +119,7 @@ pub fn trans_method_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty::MethodStatic(did) | ty::MethodStaticClosure(did) => { Callee { - bcx: bcx, + bcx: bcx.bl, data: Fn(callee::trans_fn_ref(bcx.ccx(), did, MethodCallKey(method_call), @@ -157,8 +157,9 @@ pub fn trans_method_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, callee (trying to call overloaded op?)") } }; + let mty = monomorphize_type(bcx, method_ty); trans_trait_callee(bcx, - monomorphize_type(bcx, method_ty), + mty, mt.vtable_index, self_expr, arg_cleanup_scope) @@ -320,12 +321,12 @@ fn method_with_name(ccx: &CrateContext, impl_id: ast::DefId, name: ast::Name) meth_did.def_id() } -fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - method_call: MethodCall, - trait_id: ast::DefId, - n_method: usize, - vtable: traits::Vtable<'tcx, ()>) - -> Callee<'blk, 'tcx> { +fn trans_monomorphized_callee<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + method_call: MethodCall, + trait_id: ast::DefId, + n_method: usize, + vtable: traits::Vtable<'tcx, ()>) + -> Callee<'blk, 'tcx> { let _icx = push_ctxt("meth::trans_monomorphized_callee"); match vtable { traits::VtableImpl(vtable_impl) => { @@ -353,7 +354,7 @@ fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, bcx.fcx.param_substs, callee_substs).val; - Callee { bcx: bcx, data: Fn(llfn) } + Callee { bcx: bcx.bl, data: Fn(llfn) } } traits::VtableClosure(closure_def_id, substs) => { // The substitutions should have no type parameters remaining @@ -366,21 +367,21 @@ fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, bcx.fcx.param_substs, trait_closure_kind); Callee { - bcx: bcx, + bcx: bcx.bl, data: Fn(llfn), } } traits::VtableFnPointer(fn_ty) => { let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap(); let llfn = trans_fn_pointer_shim(bcx.ccx(), trait_closure_kind, fn_ty); - Callee { bcx: bcx, data: Fn(llfn) } + Callee { bcx: bcx.bl, data: Fn(llfn) } } traits::VtableObject(ref data) => { let (llfn, _) = trans_object_shim(bcx.ccx(), data.object_ty, data.upcast_trait_ref.clone(), n_method); - Callee { bcx: bcx, data: Fn(llfn) } + Callee { bcx: bcx.bl, data: Fn(llfn) } } traits::VtableBuiltin(..) | traits::VtableDefaultImpl(..) | @@ -402,10 +403,10 @@ fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// In that case, the vector we want is: `[X, M1, M2, M3]`. Therefore, what we do now is to slice /// off the method type parameters and append them to the type parameters from the type that the /// receiver is mapped to. -fn combine_impl_and_methods_tps<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node: ExprOrMethodCall, - rcvr_substs: subst::Substs<'tcx>) - -> subst::Substs<'tcx> +fn combine_impl_and_methods_tps<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + node: ExprOrMethodCall, + rcvr_substs: subst::Substs<'tcx>) + -> subst::Substs<'tcx> { let ccx = bcx.ccx(); @@ -433,14 +434,15 @@ fn combine_impl_and_methods_tps<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// In this case, we must pull the fn pointer out of the vtable that is packaged up with the /// object. Objects are represented as a pair, so we first evaluate the self expression and then /// extract the self data and vtable out of the pair. -fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - method_ty: Ty<'tcx>, - vtable_index: usize, - self_expr: &ast::Expr, - arg_cleanup_scope: cleanup::ScopeId) - -> Callee<'blk, 'tcx> { +fn trans_trait_callee<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + method_ty: Ty<'tcx>, + vtable_index: usize, + self_expr: &ast::Expr, + arg_cleanup_scope: cleanup::ScopeId) + -> Callee<'blk, 'tcx> { let _icx = push_ctxt("meth::trans_trait_callee"); - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); // Translate self_datum and take ownership of the value by // converting to an rvalue. @@ -470,11 +472,11 @@ fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// Same as `trans_trait_callee()` above, except that it is given a by-ref pointer to the object /// pair. -pub fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - callee_ty: Ty<'tcx>, - vtable_index: usize, - llpair: ValueRef) - -> Callee<'blk, 'tcx> { +pub fn trans_trait_callee_from_llval<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + callee_ty: Ty<'tcx>, + vtable_index: usize, + llpair: ValueRef) + -> Callee<'blk, 'tcx> { let _icx = push_ctxt("meth::trans_trait_callee"); let ccx = bcx.ccx(); @@ -502,16 +504,16 @@ pub fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ccx.sess().bug("meth::trans_trait_callee given non-bare-rust-fn"); } }; - let llvtable = Load(bcx, - PointerCast(bcx, - GEPi(bcx, llpair, - &[0, abi::FAT_PTR_EXTRA]), - Type::vtable(ccx).ptr_to().ptr_to())); - let mptr = Load(bcx, GEPi(bcx, llvtable, &[0, vtable_index + VTABLE_OFFSET])); + let fpe = GEPi(bcx, llpair, &[0, abi::FAT_PTR_EXTRA]); + let ty = Type::vtable(ccx); + let pc = PointerCast(bcx, fpe, ty.ptr_to().ptr_to()); + let llvtable = Load(bcx, pc); + let vp = GEPi(bcx, llvtable, &[0, vtable_index + VTABLE_OFFSET]); + let mptr = Load(bcx, vp); let mptr = PointerCast(bcx, mptr, llcallee_ty.ptr_to()); return Callee { - bcx: bcx, + bcx: bcx.bl, data: TraitItem(MethodData { llfn: mptr, llself: llself, @@ -596,7 +598,7 @@ pub fn trans_object_shim<'a, 'tcx>( let sig = ty::erase_late_bound_regions(ccx.tcx(), &fty.sig); let empty_substs = tcx.mk_substs(Substs::trans_empty()); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); + let (block_arena, mut fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = new_fn_ctxt(ccx, llfn, @@ -606,13 +608,14 @@ pub fn trans_object_shim<'a, 'tcx>( empty_substs, None, &block_arena); - let mut bcx = init_function(&fcx, false, sig.output); + let mut fcx = &mut fcx; + let mut bcx = init_function(fcx, false, sig.output); // the first argument (`self`) will be a trait object let llobject = get_param(fcx.llfn, fcx.arg_pos(0) as u32); debug!("trans_object_shim: llobject={}", - bcx.val_to_string(llobject)); + bcx.with_fcx(fcx).val_to_string(llobject)); // the remaining arguments will be, well, whatever they are let input_tys = @@ -622,7 +625,7 @@ pub fn trans_object_shim<'a, 'tcx>( match sig.inputs[1].sty { ty::ty_tup(ref tys) => &**tys, _ => { - bcx.sess().bug( + bcx.with_fcx(fcx).sess().bug( &format!("rust-call expects a tuple not {}", sig.inputs[1].repr(tcx))); } @@ -640,7 +643,7 @@ pub fn trans_object_shim<'a, 'tcx>( .map(|(i, _)| { let llarg = get_param(fcx.llfn, fcx.arg_pos(i+1) as u32); debug!("trans_object_shim: input #{} == {}", - i, bcx.val_to_string(llarg)); + i, bcx.with_fcx(fcx).val_to_string(llarg)); llarg }) .collect(); @@ -648,18 +651,18 @@ pub fn trans_object_shim<'a, 'tcx>( assert!(!fcx.needs_ret_allocas); let dest = - fcx.llretslotptr.get().map( + fcx.llretslotptr.map( |_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot"))); let method_offset_in_vtable = - traits::get_vtable_index_of_object_method(bcx.tcx(), + traits::get_vtable_index_of_object_method(&mut bcx.with_fcx(fcx).tcx(), object_trait_ref.clone(), trait_id, method_offset_in_trait); debug!("trans_object_shim: method_offset_in_vtable={}", method_offset_in_vtable); - bcx = trans_call_inner(bcx, + bcx = trans_call_inner(&mut bcx.with_fcx(fcx), DebugLoc::None, method_bare_fn_ty, |bcx, _| trans_trait_callee_from_llval(bcx, @@ -669,7 +672,7 @@ pub fn trans_object_shim<'a, 'tcx>( ArgVals(&llargs), dest).bcx; - finish_fn(&fcx, bcx, sig.output, DebugLoc::None); + finish_fn(fcx, bcx, sig.output, DebugLoc::None); (llfn, method_bare_fn_ty) } diff --git a/src/librustc_trans/trans/tvec.rs b/src/librustc_trans/trans/tvec.rs index b02fcb6cf0c53..c3d948d95d9ca 100644 --- a/src/librustc_trans/trans/tvec.rs +++ b/src/librustc_trans/trans/tvec.rs @@ -47,10 +47,10 @@ impl<'tcx> VecTypes<'tcx> { } } -pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr, - dest: expr::Dest) - -> Block<'blk, 'tcx> { +pub fn trans_fixed_vstore<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + expr: &ast::Expr, + dest: expr::Dest) + -> &'blk Block { //! // // [...] allocates a fixed-size array and moves it around "by value". @@ -77,13 +77,13 @@ pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// &[...] allocates memory on the stack and writes the values into it, returning the vector (the /// caller must make the reference). "..." is similar except that the memory can be statically /// allocated and we return a reference (strings are always by-ref). -pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - slice_expr: &ast::Expr, - content_expr: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let fcx = bcx.fcx; - let ccx = fcx.ccx; - let mut bcx = bcx; +pub fn trans_slice_vec<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + slice_expr: &ast::Expr, + content_expr: &ast::Expr) + -> DatumBlock<'blk, 'tcx, Expr> { + let mut bcx = &mut bl.with_fcx(fcx); + let ccx = bcx.fcx.ccx; debug!("trans_slice_vec(slice_expr={})", bcx.expr_to_string(slice_expr)); @@ -94,11 +94,11 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if let ast::ExprLit(ref lit) = content_expr.node { if let ast::LitStr(ref s, _) = lit.node { let scratch = rvalue_scratch_datum(bcx, vec_ty, ""); - bcx = trans_lit_str(bcx, - content_expr, - s.clone(), - SaveIn(scratch.val)); - return DatumBlock::new(bcx, scratch.to_expr_datum()); + bcx.bl = trans_lit_str(bcx, + content_expr, + s.clone(), + SaveIn(scratch.val)); + return DatumBlock::new(bcx.bl, scratch.to_expr_datum()); } } @@ -119,14 +119,15 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if count > 0 { // Arrange for the backing array to be cleaned up. let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id); - fcx.schedule_lifetime_end(cleanup_scope, llfixed); - fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty); + bcx.fcx.schedule_lifetime_end(cleanup_scope, llfixed); + bcx.fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty); // Generate the content into the backing array. // llfixed has type *[T x N], but we want the type *T, // so use GEP to convert - bcx = write_content(bcx, &vt, slice_expr, content_expr, - SaveIn(GEPi(bcx, llfixed, &[0, 0]))); + let fp = GEPi(bcx, llfixed, &[0, 0]); + bcx.bl = write_content(bcx, &vt, slice_expr, content_expr, + SaveIn(fp)); }; immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock() @@ -134,38 +135,40 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// Literal strings translate to slices into static memory. This is different from /// trans_slice_vstore() above because it doesn't need to copy the content anywhere. -pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lit_expr: &ast::Expr, - str_lit: InternedString, - dest: Dest) - -> Block<'blk, 'tcx> { +pub fn trans_lit_str<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + lit_expr: &ast::Expr, + str_lit: InternedString, + dest: Dest) + -> &'blk Block { debug!("trans_lit_str(lit_expr={}, dest={})", bcx.expr_to_string(lit_expr), dest.to_string(bcx.ccx())); match dest { - Ignore => bcx, + Ignore => bcx.bl, SaveIn(lldest) => { let bytes = str_lit.len(); let llbytes = C_uint(bcx.ccx(), bytes); let llcstr = C_cstr(bcx.ccx(), str_lit, false); let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx())); - Store(bcx, llcstr, GEPi(bcx, lldest, &[0, abi::FAT_PTR_ADDR])); - Store(bcx, llbytes, GEPi(bcx, lldest, &[0, abi::FAT_PTR_EXTRA])); - bcx + let fpa = GEPi(bcx, lldest, &[0, abi::FAT_PTR_ADDR]); + Store(bcx, llcstr, fpa); + let fpe = GEPi(bcx, lldest, &[0, abi::FAT_PTR_EXTRA]); + Store(bcx, llbytes, fpe); + bcx.bl } } } -fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - vt: &VecTypes<'tcx>, - vstore_expr: &ast::Expr, - content_expr: &ast::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { +fn write_content<'r, 'blk, 'tcx> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + vt: &VecTypes<'tcx>, + vstore_expr: &ast::Expr, + content_expr: &ast::Expr, + dest: Dest) + -> &'blk Block { let _icx = push_ctxt("tvec::write_content"); - let fcx = bcx.fcx; - let mut bcx = bcx; + let mut bcx = &mut bl.with_fcx(fcx); debug!("write_content(vt={}, dest={}, vstore_expr={})", vt.to_string(bcx.ccx()), @@ -177,7 +180,7 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, match lit.node { ast::LitStr(ref s, _) => { match dest { - Ignore => return bcx, + Ignore => return bcx.bl, SaveIn(lldest) => { let bytes = s.len(); let llbytes = C_uint(bcx.ccx(), bytes); @@ -187,7 +190,7 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llcstr, llbytes, 1); - return bcx; + return bcx.bl; } } } @@ -201,26 +204,26 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, match dest { Ignore => { for element in elements { - bcx = expr::trans_into(bcx, &**element, Ignore); + bcx.bl = expr::trans_into(bcx, &**element, Ignore); } } SaveIn(lldest) => { - let temp_scope = fcx.push_custom_cleanup_scope(); + let temp_scope = bcx.fcx.push_custom_cleanup_scope(); for (i, element) in elements.iter().enumerate() { let lleltptr = GEPi(bcx, lldest, &[i]); debug!("writing index {} with lleltptr={}", i, bcx.val_to_string(lleltptr)); - bcx = expr::trans_into(bcx, &**element, - SaveIn(lleltptr)); + bcx.bl = expr::trans_into(bcx, &**element, + SaveIn(lleltptr)); let scope = cleanup::CustomScope(temp_scope); - fcx.schedule_lifetime_end(scope, lleltptr); - fcx.schedule_drop_mem(scope, lleltptr, vt.unit_ty); + bcx.fcx.schedule_lifetime_end(scope, lleltptr); + bcx.fcx.schedule_drop_mem(scope, lleltptr, vt.unit_ty); } - fcx.pop_custom_cleanup_scope(temp_scope); + bcx.fcx.pop_custom_cleanup_scope(temp_scope); } } - return bcx; + return bcx.bl; } ast::ExprRepeat(ref element, ref count_expr) => { match dest { @@ -233,8 +236,9 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, 1 => expr::trans_into(bcx, &**element, SaveIn(lldest)), count => { let elem = unpack_datum!(bcx, expr::trans(bcx, &**element)); + let ty = C_uint(bcx.ccx(), count); let bcx = iter_vec_loop(bcx, lldest, vt, - C_uint(bcx.ccx(), count), + ty, |set_bcx, lleltptr, _| { elem.shallow_copy(set_bcx, lleltptr) }); @@ -251,21 +255,22 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &ast::Expr) - -> VecTypes<'tcx> { +fn vec_types_from_expr<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, vec_expr: &ast::Expr) + -> VecTypes<'tcx> { let vec_ty = node_id_type(bcx, vec_expr.id); - vec_types(bcx, ty::sequence_element_type(bcx.tcx(), vec_ty)) + let ty = ty::sequence_element_type(bcx.tcx(), vec_ty); + vec_types(bcx, ty) } -fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>) - -> VecTypes<'tcx> { +fn vec_types<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, unit_ty: Ty<'tcx>) + -> VecTypes<'tcx> { VecTypes { unit_ty: unit_ty, llunit_ty: type_of::type_of(bcx.ccx(), unit_ty) } } -fn elements_required(bcx: Block, content_expr: &ast::Expr) -> usize { +fn elements_required(bcx: &mut BlockContext, content_expr: &ast::Expr) -> usize { //! Figure out the number of elements we need to store this content match content_expr.node { @@ -289,7 +294,7 @@ fn elements_required(bcx: Block, content_expr: &ast::Expr) -> usize { /// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval` /// which should be by ref. -pub fn get_fixed_base_and_len(bcx: Block, +pub fn get_fixed_base_and_len(bcx: &mut BlockContext, llval: ValueRef, vec_length: usize) -> (ValueRef, ValueRef) { @@ -303,17 +308,19 @@ pub fn get_fixed_base_and_len(bcx: Block, /// Converts a vector into the slice pair. The vector should be stored in `llval` which should be /// by-reference. If you have a datum, you would probably prefer to call /// `Datum::get_base_and_len()` which will handle any conversions for you. -pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llval: ValueRef, - vec_ty: Ty<'tcx>) - -> (ValueRef, ValueRef) { +pub fn get_base_and_len<'r, 'blk, 'tcx>(bcx: &mut BlockContext<'r, 'blk, 'tcx>, + llval: ValueRef, + vec_ty: Ty<'tcx>) + -> (ValueRef, ValueRef) { let ccx = bcx.ccx(); match vec_ty.sty { ty::ty_vec(_, Some(n)) => get_fixed_base_and_len(bcx, llval, n), ty::ty_vec(_, None) | ty::ty_str => { - let base = Load(bcx, expr::get_dataptr(bcx, llval)); - let len = Load(bcx, expr::get_len(bcx, llval)); + let dp = expr::get_dataptr(bcx, llval); + let base = Load(bcx, dp); + let dl = expr::get_len(bcx, llval); + let len = Load(bcx, dl); (base, len) } @@ -330,39 +337,43 @@ pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - vt: &VecTypes<'tcx>, - count: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, +fn iter_vec_loop<'r, 'blk, 'tcx, F> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + data_ptr: ValueRef, + vt: &VecTypes<'tcx>, + count: ValueRef, + f: F) + -> &'blk Block where + F: for<'a> FnOnce(&mut BlockContext<'a, 'blk, 'tcx>, ValueRef, Ty<'tcx>) -> &'blk Block { let _icx = push_ctxt("tvec::iter_vec_loop"); - if bcx.unreachable.get() { - return bcx; + if bl.unreachable.get() { + return bl; } - let fcx = bcx.fcx; let loop_bcx = fcx.new_temp_block("expr_repeat"); let next_bcx = fcx.new_temp_block("expr_repeat: next"); + let mut bcx = &mut bl.with_fcx(fcx); Br(bcx, loop_bcx.llbb, DebugLoc::None); - let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(), - &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]); + let ty = bcx.ccx().int_type(); + let v = [C_uint(bcx.ccx(), 0 as usize)]; + let bb = [bcx.bl.llbb]; + let loop_counter = Phi(&mut loop_bcx.with_fcx(bcx.fcx), ty, &v, &bb); - let bcx = loop_bcx; + let bcx = &mut loop_bcx.with_fcx(bcx.fcx); let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 { data_ptr } else { InBoundsGEP(bcx, data_ptr, &[loop_counter]) }; - let bcx = f(bcx, lleltptr, vt.unit_ty); - let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None); - AddIncomingToPhi(loop_counter, plusone, bcx.llbb); + let bcx = &mut f(bcx, lleltptr, vt.unit_ty).with_fcx(bcx.fcx); + let one = C_uint(bcx.ccx(), 1usize); + let plusone = Add(bcx, loop_counter, one, DebugLoc::None); + AddIncomingToPhi(loop_counter, plusone, bcx.bl.llbb); let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None); CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None); @@ -370,17 +381,18 @@ fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, next_bcx } -pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - unit_ty: Ty<'tcx>, - len: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, +pub fn iter_vec_raw<'r, 'blk, 'tcx, F> + (&mut BlockContext { bl, ref mut fcx }: &mut BlockContext<'r, 'blk, 'tcx>, + data_ptr: ValueRef, + unit_ty: Ty<'tcx>, + len: ValueRef, + f: F) + -> &'blk Block where + F: for<'a> FnOnce(&mut BlockContext<'a, 'blk, 'tcx>, ValueRef, Ty<'tcx>) -> &'blk Block, { let _icx = push_ctxt("tvec::iter_vec_raw"); - let fcx = bcx.fcx; + let mut bcx = &mut bl.with_fcx(fcx); let vt = vec_types(bcx, unit_ty); if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 { @@ -391,20 +403,23 @@ pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]); // Now perform the iteration. - let header_bcx = fcx.new_temp_block("iter_vec_loop_header"); + let header_bcx = bcx.fcx.new_temp_block("iter_vec_loop_header"); Br(bcx, header_bcx.llbb, DebugLoc::None); let data_ptr = - Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]); + Phi(&mut header_bcx.with_fcx(bcx.fcx), val_ty(data_ptr), &[data_ptr], &[bcx.bl.llbb]); let not_yet_at_end = - ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None); - let body_bcx = fcx.new_temp_block("iter_vec_loop_body"); - let next_bcx = fcx.new_temp_block("iter_vec_next"); - CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); - let body_bcx = f(body_bcx, data_ptr, unit_ty); - AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr, - &[C_int(bcx.ccx(), 1)]), + ICmp(&mut header_bcx.with_fcx(bcx.fcx), + llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None); + let body_bcx = bcx.fcx.new_temp_block("iter_vec_loop_body"); + let next_bcx = bcx.fcx.new_temp_block("iter_vec_next"); + CondBr(&mut header_bcx.with_fcx(bcx.fcx), not_yet_at_end, + body_bcx.llbb, next_bcx.llbb, DebugLoc::None); + let body_bcx = f(&mut body_bcx.with_fcx(bcx.fcx), data_ptr, unit_ty); + let one = [C_int(bcx.ccx(), 1)]; + AddIncomingToPhi(data_ptr, InBoundsGEP(&mut body_bcx.with_fcx(bcx.fcx), data_ptr, + &one), body_bcx.llbb); - Br(body_bcx, header_bcx.llbb, DebugLoc::None); + Br(&mut body_bcx.with_fcx(bcx.fcx), header_bcx.llbb, DebugLoc::None); next_bcx } } diff --git a/src/librustc_trans/trans/value.rs b/src/librustc_trans/trans/value.rs index bc71278c15743..0af2de1570044 100644 --- a/src/librustc_trans/trans/value.rs +++ b/src/librustc_trans/trans/value.rs @@ -11,7 +11,7 @@ use llvm; use llvm::{UseRef, ValueRef}; use trans::basic_block::BasicBlock; -use trans::common::Block; +use trans::common::BlockContext; use libc::c_uint; #[derive(Copy, Clone)] @@ -54,11 +54,11 @@ impl Value { /// This only performs a search for a trivially dominating store. The store /// must be the only user of this value, and there must not be any conditional /// branches between the store and the given block. - pub fn get_dominating_store(self, bcx: Block) -> Option { + pub fn get_dominating_store(self, bcx: &mut BlockContext) -> Option { match self.get_single_user().and_then(|user| user.as_store_inst()) { Some(store) => { store.get_parent().and_then(|store_bb| { - let mut bb = BasicBlock(bcx.llbb); + let mut bb = BasicBlock(bcx.bl.llbb); let mut ret = Some(store); while bb.get() != store_bb.get() { match bb.get_single_predecessor() {